path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
jie/OceanSciences/DrifterParticleImage.ipynb
###Markdown Reproduce drifter ###Code %matplotlib inline from matplotlib import pylab import matplotlib.pyplot as plt import netCDF4 as nc import numpy as np import scipy.io import datetime as dt from salishsea_tools import nc_tools, viz_tools, tidetools, stormtools, bathy_tools,geo_tools from __future__ import division drifters = scipy.io.loadmat('/ocean/mhalvers/research/drifters/SoG_drifters.mat',squeeze_me=True) ubc = drifters['ubc'] grid = nc.Dataset('/ocean/jieliu/research/meopar/nemo-forcing/grid/bathy_meter_SalishSea2.nc','r') bathy = grid.variables['Bathymetry'][:, :] X = grid.variables['nav_lon'][:, :] Y = grid.variables['nav_lat'][:, :] bathyy,X,Y = tidetools.get_SS2_bathy_data() def convert_time(matlab_time_array): "converts a matlab time array to python format" python_time_array=[] for t in matlab_time_array: python_datetime = dt.datetime.fromordinal(int(t)) + dt.timedelta(days=t%1) - dt.timedelta(days = 366) python_time_array.append(python_datetime) python_time_array = np.array(python_time_array) return python_time_array def get_tracks(switch,lats,lons,ptime,in_water): """returns a list of tracks of each buoy, ie a trajectory for each time the buoy was released into the water""" all_tracks=[] for ind in switch: track_on = 1 i = ind track ={'time':[], 'lat':[],'lon':[]} while(track_on): if in_water[i]!=1: track_on=0 elif i==np.shape(in_water)[0]-1: track['time'].append(ptime[i]) track['lat'].append(lats[i]) track['lon'].append(lons[i]) track_on=0 else: track['time'].append(ptime[i]) track['lat'].append(lats[i]) track['lon'].append(lons[i]) i=i+1 all_tracks.append(track) return all_tracks def organize_info(buoy,btype): """ organizes the buoy info. Groups the buoy data into tracks for when it was released into the water. """ #creat arrays for easier access buoy_name = btype[buoy][0] lats = btype[buoy]['lat'].flatten() lons = btype[buoy]['lon'].flatten() mtime = btype[buoy]['mtime'] in_water = btype[buoy]['isSub'].flatten() #convert mtime to python datetimes ptime = convert_time(mtime) #loop through in_water flag to find when buoy switched from being out of water to being in water. switch = []; for ind in np.arange(1,in_water.shape[0]): if int(in_water[ind]) != int(in_water[ind-1]): if int(in_water[ind])==1: switch.append(ind) all_tracks=get_tracks(switch,lats,lons,ptime.flatten(),in_water) return buoy_name, all_tracks def find_start(tracks, start_date): """returns the a list of indices for a track released on start date. Only checks the month and day of the start day""" i=0 ind=[] starttimes=[] for t in tracks: if int(t['time'][0].month) == start_date.month: if int(t['time'][0].day) == start_date.day: ind.append(i) i=i+1 return ind def plot_buoy(tracks, startdate, i=0, fancy=False): """ plots a buoy trajectory at the given startdate in an axis, ax. returns the trajectory that was plotted. The first track released on the startdate is plotted. For trajectories that were released mulitples times a day, i selects which release is plotted. """ fig,ax = plt.subplots(1,1,figsize=(6,6)) ind =find_start(tracks,startdate) traj=tracks[ind[i]] duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600 print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours') ax.plot(traj['lon'],traj['lat'],'og',label = 'data') #ax.legend(loc='best') ax.plot(traj['lon'][0],traj['lat'][0],'sr') print(float(traj['lon'][0])) #[j,i]=geo_tools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,land_mask=bathyy.mask()) ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2); if fancy: cmap = plt.get_cmap('winter_r') cmap.set_bad('burlywood') ax.pcolormesh(X, Y, bathy, cmap=cmap) ax.set_title('Observed Drift Track') ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.text(-123.15,49.13, "Fraser River", fontsize=12) else: viz_tools.plot_coastline(ax, grid, coords='map') viz_tools.plot_coastline(ax, grid, coords='map',isobath=4) viz_tools.plot_coastline(ax, grid, coords='map',isobath=20) #print ('NEMO coords:', j,i) ax.set_xlim([-123.6,-123]) ax.set_ylim([48.8,49.4]) ax.set_xticks([-123.6, -123.4, -123.2,-123]) ax.set_xticklabels([-123.6, -123.4, -123.2,-123]) ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.legend(loc = 'best') return fig buoy = 2 name, tracks=organize_info(buoy,ubc) print(name) fig=plot_buoy(tracks,dt.datetime(2014,10,8), i=-1) ###Output UBC-I-0003 Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours -123.31983333333334 ###Markdown Produce both togather ###Code def plot_both(tracks, startdate, lon,lat,part,start,end,start_d,end_d,day,hour,minute,duration,i=0, fancy=False): """ plots a buoy trajectory at the given startdate in an axis, ax. returns the trajectory that was plotted. The first track released on the startdate is plotted. For trajectories that were released mulitples times a day, i selects which release is plotted. """ fig,axs = plt.subplots(1,2,figsize=(12,6)) ax = axs[0] ind =find_start(tracks,startdate) traj=tracks[ind[i]] duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600 print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours') ax.plot(traj['lon'],traj['lat'],'og',label = 'data') #ax.legend(loc='best') ax.plot(traj['lon'][0],traj['lat'][0],'sr') #[j,i]=tidetools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,bathy,\ #lon_tol=0.0052,lat_tol=0.00210, allow_land=False) ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2); if fancy: cmap = plt.get_cmap('winter_r') cmap.set_bad('burlywood') ax.pcolormesh(X, Y, bathy, cmap=cmap) ax.set_title('Observed Drift Track') ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.text(-123.15,49.13, "Fraser River", fontsize=12) else: viz_tools.plot_coastline(ax, grid, coords='map') viz_tools.plot_coastline(ax, grid, coords='map',isobath=4) viz_tools.plot_coastline(ax, grid, coords='map',isobath=20) #print ('NEMO coords:', j,i) ax = axs[1] viz_tools.plot_coastline(ax,grid,coords='map') viz_tools.plot_coastline(ax,grid,coords='map',isobath=4) viz_tools.plot_coastline(ax,grid,coords='map',isobath=20) colors=['DodgerBlue'] for i, key in enumerate(lon.keys()): ax.scatter(lon[key][1:,part],lat[key][1:,part],marker='o',color=colors[i],label=key) ax.plot(lon[key][0,part],lat[key][0,part],'sr') ax.plot(-123-np.array([18.2,13.7,12])/60.,49+np.array([6.4,8,7.6])/60.,'-k',lw=2,color='SpringGreen') for ax in axs: ax.set_xlim([-123.6,-123]); ax.set_ylim([48.8,49.4]) ax.set_xticks([-123.6, -123.4, -123.2,-123]) ax.set_xticklabels([-123.6, -123.4, -123.2,-123]) ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.legend(loc = 'best') return fig buoy = 2 name, tracks=organize_info(buoy,ubc) fig = plot_both(tracks,dt.datetime(2014,10,8),lon112,lat112,0,'7-Oct-2014','11-Oct-2014',8,10,8,16,0,29,i=-1) lon112={};lat112={} o112 = NC.Dataset('/ocean/jieliu/research/meopar/Ariane/result/oct8_101e061e05/drop1/12/ariane_trajectories_qualitative.nc','r') lon112['model']=o112.variables['traj_lon'] lat112['model']=o112.variables['traj_lat'] buoy = 2 name, tracks=organize_info(buoy,ubc) fig = plot_both(tracks,dt.datetime(2014,10,8),lon112,lat112,0,'7-Oct-2014','11-Oct-2014',8,10,8,16,0,29,i=-1) ###Output Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours NEMO coords: 429 290 ###Markdown Reproduce drifter ###Code %matplotlib inline from matplotlib import pylab import matplotlib.pyplot as plt import netCDF4 as nc import numpy as np import scipy.io import datetime as dt from salishsea_tools import nc_tools, viz_tools, tidetools, stormtools, bathy_tools from __future__ import division drifters = scipy.io.loadmat('/ocean/mhalvers/research/drifters/SoG_drifters.mat',squeeze_me=True) ubc = drifters['ubc'] grid = nc.Dataset('/ocean/jieliu/research/meopar/nemo-forcing/grid/bathy_meter_SalishSea2.nc','r') bathy = grid.variables['Bathymetry'][:, :] X = grid.variables['nav_lon'][:, :] Y = grid.variables['nav_lat'][:, :] def convert_time(matlab_time_array): "converts a matlab time array to python format" python_time_array=[] for t in matlab_time_array: python_datetime = dt.datetime.fromordinal(int(t)) + dt.timedelta(days=t%1) - dt.timedelta(days = 366) python_time_array.append(python_datetime) python_time_array = np.array(python_time_array) return python_time_array def get_tracks(switch,lats,lons,ptime,in_water): """returns a list of tracks of each buoy, ie a trajectory for each time the buoy was released into the water""" all_tracks=[] for ind in switch: track_on = 1 i = ind track ={'time':[], 'lat':[],'lon':[]} while(track_on): if in_water[i]!=1: track_on=0 elif i==np.shape(in_water)[0]-1: track['time'].append(ptime[i]) track['lat'].append(lats[i]) track['lon'].append(lons[i]) track_on=0 else: track['time'].append(ptime[i]) track['lat'].append(lats[i]) track['lon'].append(lons[i]) i=i+1 all_tracks.append(track) return all_tracks def organize_info(buoy,btype): """ organizes the buoy info. Groups the buoy data into tracks for when it was released into the water. """ #creat arrays for easier access buoy_name = btype[buoy][0] lats = btype[buoy]['lat'].flatten() lons = btype[buoy]['lon'].flatten() mtime = btype[buoy]['mtime'] in_water = btype[buoy]['isSub'].flatten() #convert mtime to python datetimes ptime = convert_time(mtime) #loop through in_water flag to find when buoy switched from being out of water to being in water. switch = []; for ind in np.arange(1,in_water.shape[0]): if int(in_water[ind]) != int(in_water[ind-1]): if int(in_water[ind])==1: switch.append(ind) all_tracks=get_tracks(switch,lats,lons,ptime.flatten(),in_water) return buoy_name, all_tracks def find_start(tracks, start_date): """returns the a list of indices for a track released on start date. Only checks the month and day of the start day""" i=0 ind=[] starttimes=[] for t in tracks: if int(t['time'][0].month) == start_date.month: if int(t['time'][0].day) == start_date.day: ind.append(i) i=i+1 return ind def plot_buoy(tracks, startdate, i=0, fancy=False): """ plots a buoy trajectory at the given startdate in an axis, ax. returns the trajectory that was plotted. The first track released on the startdate is plotted. For trajectories that were released mulitples times a day, i selects which release is plotted. """ fig,ax = plt.subplots(1,1,figsize=(6,6)) ind =find_start(tracks,startdate) traj=tracks[ind[i]] duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600 print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours') ax.plot(traj['lon'],traj['lat'],'og') #ax.legend(loc='best') ax.plot(traj['lon'][0],traj['lat'][0],'sr') [j,i]=tidetools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,bathy,\ lon_tol=0.0052,lat_tol=0.00210, allow_land=False) ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2); if fancy: cmap = plt.get_cmap('winter_r') cmap.set_bad('burlywood') ax.pcolormesh(X, Y, bathy, cmap=cmap) ax.set_title('Observed Drift Track') ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.text(-123.15,49.13, "Fraser River", fontsize=12) else: viz_tools.plot_coastline(ax, grid, coords='map') viz_tools.plot_coastline(ax, grid, coords='map',isobath=4) viz_tools.plot_coastline(ax, grid, coords='map',isobath=20) print ('NEMO coords:', j,i) ax.set_xlim([-123.6,-123]) ax.set_ylim([48.8,49.4]) ax.set_xticks([-123.6, -123.4, -123.2,-123]) ax.set_xticklabels([-123.6, -123.4, -123.2,-123]) ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') return fig mpl.rcParams.update({'font.size': 14}) mpl.rcParams["axes.formatter.useoffset"] = False buoy = 2 name, tracks=organize_info(buoy,ubc) print(name) fig=plot_buoy(tracks,dt.datetime(2014,10,8), i=-1) ###Output UBC-I-0003 Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours NEMO coords: 429 290 ###Markdown Produce both togather ###Code def plot_both(tracks, startdate, lon,lat,part,start,end,start_d,end_d,day,hour,minute,duration,i=0, fancy=False): """ plots a buoy trajectory at the given startdate in an axis, ax. returns the trajectory that was plotted. The first track released on the startdate is plotted. For trajectories that were released mulitples times a day, i selects which release is plotted. """ fig,axs = plt.subplots(1,2,figsize=(12,6)) ax = axs[0] ind =find_start(tracks,startdate) traj=tracks[ind[i]] duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600 print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours') ax.plot(traj['lon'],traj['lat'],'og',label = 'data') #ax.legend(loc='best') ax.plot(traj['lon'][0],traj['lat'][0],'sr') [j,i]=tidetools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,bathy,\ lon_tol=0.0052,lat_tol=0.00210, allow_land=False) ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2); if fancy: cmap = plt.get_cmap('winter_r') cmap.set_bad('burlywood') ax.pcolormesh(X, Y, bathy, cmap=cmap) ax.set_title('Observed Drift Track') ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.text(-123.15,49.13, "Fraser River", fontsize=12) else: viz_tools.plot_coastline(ax, grid, coords='map') viz_tools.plot_coastline(ax, grid, coords='map',isobath=4) viz_tools.plot_coastline(ax, grid, coords='map',isobath=20) print ('NEMO coords:', j,i) ax = axs[1] viz_tools.plot_coastline(ax,grid,coords='map') viz_tools.plot_coastline(ax,grid,coords='map',isobath=4,color='DarkViolet') viz_tools.plot_coastline(ax,grid,coords='map',isobath=20,color='OrangeRed') colors=['DodgerBlue'] for i, key in enumerate(lon.keys()): ax.scatter(lon[key][1:,part],lat[key][1:,part],marker='o',color=colors[i],label=key) ax.scatter(lon[key][0,part],lat[key][0,part],color='0.30',marker='s') ax.plot(-123-np.array([18.2,13.7,12])/60.,49+np.array([6.4,8,7.6])/60.,'-k',lw=2,color='SpringGreen') for ax in axs: ax.set_xlim([-123.6,-123]); ax.set_ylim([48.8,49.4]) ax.set_xticks([-123.6, -123.4, -123.2,-123]) ax.set_xticklabels([-123.6, -123.4, -123.2,-123]) ax.set_xlabel('Longitude') ax.set_ylabel('Latitude') ax.legend(loc = 2) return fig lon112={};lat112={} o112 = NC.Dataset('/ocean/jieliu/research/meopar/Ariane/result/oct8_101e061e05/drop1/12/ariane_trajectories_qualitative.nc','r') lon112['model']=o112.variables['traj_lon'] lat112['model']=o112.variables['traj_lat'] mpl.rcParams.update({'font.size': 14}) mpl.rcParams["axes.formatter.useoffset"] = False buoy = 2 name, tracks=organize_info(buoy,ubc) fig = plot_both(tracks,dt.datetime(2014,10,8),lon112,lat112,0,'7-Oct-2014','11-Oct-2014',8,10,8,16,0,29,i=-1) ###Output Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours NEMO coords: 429 290
Lessons/Pyspark_Notebooks/Pyspark_array_manipulation_2.ipynb
###Markdown Difference Between map and flatmap ###Code values = sc.parallelize([1, 2, 3, 4], 2) print(values.map(range).collect()) # [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]] print(values.flatMap(range).collect()) # [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] ###Output [range(0, 1), range(0, 2), range(0, 3), range(0, 4)] [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] ###Markdown Converting from Pyspark to conventional Python functionKeep in mind, the Pyspark way and map reduce ways are still faster on large datasets! ###Code def big_sum(ls): '''Same as above, example 1''' ls = [x*2 for x in ls if (x*2)%4 == 0] summation = sum(ls) return math.sqrt(summation) big_sum(range(100_000)) def f(ls): '''same as above, example 2''' s = 0 for i in ls: # only the values whose square is divisible by 4, gets added if (i*2)%4 == 0: s += (i*2) return math.sqrt(s) print(f(range(100000))) ###Output 70709.97100833799 ###Markdown Difference Between map and flatmap ###Code values = sc.parallelize([1, 2, 3, 4], 2) print(values.map(range).collect()) # ranges an RDD as a 2D list, and in this case what goes in each is a range object, after each value in original is given to the range function # [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]] print(values.flatMap(range).collect()) # flattens the data to a 1D list # [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] ###Output [range(0, 1), range(0, 2), range(0, 3), range(0, 4)] [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] ###Markdown Difference Between map and flatmap ###Code values = sc.parallelize([1, 2, 3, 4], 2) print(values.map(range).collect()) # [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]] print(values.flatMap(range).collect()) # [0, 0, 1, 0, 1, 2, 0, 1, 2, 3] ###Output [range(0, 1), range(0, 2), range(0, 3), range(0, 4)] [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
data_modelling/01_indroduction_to_data_modelling/exercises/L1_Exercise_2_Creating_a_Table_with_Apache_Cassandra.ipynb
###Markdown Lesson 1 Exercise 2: Creating a Table with Apache Cassandra Walk through the basics of Apache Cassandra. Complete the following tasks: Create a table in Apache Cassandra, Insert rows of data, Run a simple SQL query to validate the information. `` denotes where the code needs to be completed. Note: __Do not__ click the blue Preview button in the lower taskbar Import Apache Cassandra python package ###Code import cassandra ###Output _____no_output_____ ###Markdown Create a connection to the database ###Code from cassandra.cluster import Cluster try: cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance session = cluster.connect() except Exception as e: print(e) ###Output _____no_output_____ ###Markdown TO-DO: Create a keyspace to do the work in ###Code ## TO-DO: Create the keyspace try: session.execute(""" CREATE KEYSPACE IF NOT EXISTS udacity WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }""" ) except Exception as e: print(e) ###Output _____no_output_____ ###Markdown TO-DO: Connect to the Keyspace ###Code ## To-Do: Add in the keyspace you created try: session.set_keyspace('udacity') except Exception as e: print(e) ###Output _____no_output_____ ###Markdown Create a Song Library that contains a list of songs, including the song name, artist name, year, album it was from, and if it was a single. `song_titleartist_nameyearalbum_namesingle` TO-DO: You need to create a table to be able to run the following query: `select * from songs WHERE year=1970 AND artist_name="The Beatles"` ###Code ## TO-DO: Complete the query below query = "CREATE TABLE IF NOT EXISTS music_library " query = query + "(song_title text, artist_name text, year int, album_name text, single text, PRIMARY KEY (year, artist_name))" try: session.execute(query) except Exception as e: print(e) ###Output _____no_output_____ ###Markdown TO-DO: Insert the following two rows in your table`First Row: "Across The Universe", "The Beatles", "1970", "False", "Let It Be"``Second Row: "The Beatles", "Think For Yourself", "False", "1965", "Rubber Soul"` ###Code ## Add in query and then run the insert statement query = "INSERT INTO music_library (song_title, artist_name, year, single, album_name)" query = query + " VALUES (%s, %s, %s, %s, %s)" try: session.execute(query, ("Across The Universe", "The Beatles", 1970, "False", "Let It Be")) except Exception as e: print(e) try: session.execute(query, ("Think For Yourself", "The Beatles", 1965, "False", "Rubber Soul")) except Exception as e: print(e) ###Output _____no_output_____ ###Markdown TO-DO: Validate your data was inserted into the table. ###Code ## TO-DO: Complete and then run the select statement to validate the data was inserted into the table query = 'SELECT * FROM music_library' try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.year, row.album_name, row.artist_name) ###Output 1965 Rubber Soul The Beatles 1970 Let It Be The Beatles ###Markdown TO-DO: Validate the Data Model with the original query.`select * from songs WHERE YEAR=1970 AND artist_name="The Beatles"` ###Code ##TO-DO: Complete the select statement to run the query query = "drop table music_library" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.year, row.album_name, row.artist_name) ###Output _____no_output_____ ###Markdown And Finally close the session and cluster connection ###Code session.shutdown() cluster.shutdown() ###Output _____no_output_____
docs/writecustom.ipynb
###Markdown Writing Custom Models Writing custom theoretical models is a powerful, extensible option of the LamAna package. Authoring Custom ModelsCustom models are simple `.py` files that can be locally placed by the user into the models directory. The API finds these selected files from the `apply(model='')` method in the `distributions.Case` class. In order for these processes to work smoothly, the following essentials are needed to "handshake" with `theories` module. 1. Implement a `_use_model_()` hook that returns (at minimum) an updated DataFrame.1. If using the class-style models, implement `_use_model_()` hook within a class that inherits from `theories.BaseModel`.Exceptions for specific models are maintained by the models author. Which style do I implement?- For beginners, function-style models are the best way to start making custom models.- We recommend class-style models, which use object-oriented principles such as inheritance. This is best suited for intermediate Pythonistas, which we encourage everyone to consider acheiving. :) Examples of both function-style and class-style models are found in the ["examples"](https://github.com/par2/lamana/tree/develop/examples) folder of the repository.The following cell shows an excerpt of the class-style model. ```python------------------------------------------------------------------------------ Class-style model ...class Model(BaseModel): '''A custom CLT model. A modified laminate theory for circular biaxial flexure disks, loaded with a flat piston punch on 3-ball support having two distinct materials (polymer and ceramic). ''' def __init__(self): self.Laminate = None self.FeatureInput = None self.LaminateModel = None def _use_model_(self, Laminate, adjusted_z=False): '''Return updated DataFrame and FeatureInput. ... Returns ------- tuple The updated calculations and parameters stored in a tuple `(LaminateModel, FeatureInput)``. df : DataFrame LaminateModel with IDs and Dimensional Variables. FeatureInut : dict Geometry, laminate parameters and more. Updates Globals dict for parameters in the dashboard output. ''' ... return (df, FeatureInput) Add Defaults here``` ###Code .. note:: DEV: If testing with both function- and class-styles, keep in mind any changes to the model should be reflected in both styles. ###Output _____no_output_____ ###Markdown What are `Defaults`? Recall there are a set of **geometric, loading and material parameters** that are required to run LT calculations. During analysis, retyping these parameters may become tedious each time you wish to run a simple plot or test parallel case. Therefore, you can prepare variables that store default parameters with specific values.LamAna eases this process by simply inheriting from `BaseDefaults`. The `BaseDefaults` class stores a number of common *geometry strings*, *Geometry objects* and arbitrary *loading parameters*/*material properties*. These values are intended to get you started, but you can alter to fit your better suit model. In addition, this class has methods for easily building formatted *FeatureInput* objects. ```pythonclass Defaults(BaseDefaults): '''Return parameters for building distributions cases. Useful for consistent testing. Dimensional defaults are inherited from utils.BaseDefaults(). Material-specific parameters are defined here by he user. - Default geometric parameters - Default material properties - Default FeatureInput Examples ======== >>> dft = Defaults() >>> dft.load_params {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,} >>> dft.mat_props {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} >>> dft.FeatureInput {'Geometry' : '400-[200]-800', 'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}, 'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],}, 'Custom' : None, 'Model' : Wilson_LT} ''' def __init__(self): BaseDefaults.__init__(self) '''DEV: Add defaults first. Then adjust attributes.''' DEFAULTS ------------------------------------------------------------ Build dicts of geometric and material parameters self.load_params = { 'R': 12e-3, specimen radius 'a': 7.5e-3, support ring radius 'p': 5, points/layer 'P_a': 1, applied load 'r': 2e-4, radial distance from center loading } self.mat_props = { 'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33} } ATTRIBUTES ---------------------------------------------------------- FeatureInput self.FeatureInput = self.get_FeatureInput( self.Geo_objects['standard'][0], load_params=self.load_params, mat_props=self.mat_props, model='Wilson_LT', global_vars=None )``` ###Code .. see also: The latter guidelines are used for authoring custom models on your local machine. If you would like to share you model, see the `Contributions: As an Author <contribution>_` section for more details. ###Output _____no_output_____
example/Example-a3667-Chandra.ipynb
###Markdown Load data Initialize an `ObservationList` object. ###Code observations = sbfit.ObservationList() ###Output _____no_output_____ ###Markdown Load images into the `ObservationList` object. ###Code image_dir = "a3667/chandra" obsids = [513, 889, 5751, 5752, 5753, 6292, 6295, 6296] for obsid in obsids: observations.add_observation_from_file(f"a3667/chandra/{obsid}_band1_thresh.img", f"a3667/chandra/{obsid}_band1_thresh.expmap", f"a3667/chandra/{obsid}_band1_nxb_full.img", bkg_norm_type="count", bkg_norm_keyword="bkgnorm", ) ###Output _____no_output_____ ###Markdown Read region file ###Code epanda = sbfit.read_region("a3667.reg") ###Output _____no_output_____ ###Markdown Extract a profileThe region set loaded in the previous step is used.The `channel_width` is the size of the radius grid for a profile. The value should be less than the psf width. ###Code a3667_chandra_profile = observations.get_profile(epanda, channel_width=0.5) ###Output WARNING: FITSFixedWarning: RADECSYS= 'ICRS ' / default the RADECSYS keyword is deprecated, use RADESYSa. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 51444.513808 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 51797.187928 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 53529.557755 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 53534.009225 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 53539.451968 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 53532.193692 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 53536.858264 from DATE-END'. [astropy.wcs.wcs] WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF. Set MJD-END to 53541.426528 from DATE-END'. [astropy.wcs.wcs] ###Markdown First, let's bin the profile. ###Code a3667_chandra_profile.rebin(130, 250,method="lin",min_cts=200,log_width=0.005,lin_width=1) a3667_chandra_profile.plot(scale="loglog") ###Output /Users/xyzhang/anaconda3/envs/SBFit/lib/python3.7/site-packages/sbfit-0.1.2-py3.7.egg/sbfit/profile.py:506: UserWarning: No model set. ###Markdown - For *Chandra* observations, the PSF is small enough to use an identity smoothing matrix, which is the default setting.- For *XMM-Newton*, a King profile smoothing matrix is essential to account for the broad PSF. The parameters of the profile are provided in the PSF calibration files. ###Code a3667_chandra_profile.set_smooth_matrix("identity", king_alpha=1.4,king_rc=10,sigma=1) ###Output _____no_output_____ ###Markdown Set modelsSet a double power law model and load it into the profile.The `+` operator can add multiple model instances into a compound model instance.Here we use the combination of a `DoublePowerLaw` model and a `Constant` model, which represents the ICM emission and the X-ray sky background. ###Code dpl = sbfit.model.DoublePowerLaw() cst = sbfit.model.Constant() a3667_chandra_profile.set_model(dpl + cst) print(a3667_chandra_profile.model) ###Output Model: CompoundModel Inputs: ('x',) Outputs: ('y',) Model set size: 1 Expression: [0] + [1] Components: [0]: <DoublePowerLaw(norm=1., a1=0.1, a2=1., r=1., c=2.)> [1]: <Constant(norm=0.)> Parameters: norm_0 a1_0 a2_0 r_0 c_0 norm_1 ------ ---- ---- --- --- ------ 1.0 0.1 1.0 1.0 2.0 0.0 ###Markdown Before fit, set initial parameters that make the model profile close to the observed profile. ###Code a3667_chandra_profile.model.norm_0 = 4e-4 a3667_chandra_profile.model.a1_0 = 0 a3667_chandra_profile.model.a2_0 = 0.6 a3667_chandra_profile.model.r_0 = 191 a3667_chandra_profile.model.c_0 = 2.4 a3667_chandra_profile.model.norm_1 = 5e-7 a3667_chandra_profile.calculate() a3667_chandra_profile.plot() ###Output _____no_output_____ ###Markdown Set parameter constraints for the model in case that the optimizer goes too far. ###Code a3667_chandra_profile.model.norm_1.fixed = True a3667_chandra_profile.model.norm_0.bounds = (1e-4, 6e-4) a3667_chandra_profile.model.a1_0.bounds = (-0.7, 0.3) a3667_chandra_profile.model.a2_0.bounds = (0.4, 1.1) a3667_chandra_profile.model.r_0.bounds = (150, 220) a3667_chandra_profile.model.c_0.bounds = (1.8, 3.3) ###Output _____no_output_____ ###Markdown Fit ###Code a3667_chandra_profile.fit(show_step=True, tolerance=0.01) p_bin2 = a3667_chandra_profile.deepcopy() p_bin2.rebin(130, 250,method="lin",min_cts=200,log_width=0.005,lin_width=2) p_bin2.fit(show_step=True) ###Output Start fit C-stat: 68.426 [3.98619753e-04 7.83445097e-02 5.39570169e-01 1.91250219e+02 2.47354035e+00] C-stat: 68.405 [3.97761118e-04 7.71829563e-02 5.38463770e-01 1.91285037e+02 2.47927233e+00] C-stat: 68.405 [3.97719203e-04 7.71599546e-02 5.38401152e-01 1.91286173e+02 2.47954135e+00] Iteration terminated. Degree of freedom: 55; C-stat: 68.4048 norm_0: 3.98e-04 a1_0: 7.72e-02 a2_0: 5.38e-01 r_0: 1.91e+02 c_0: 2.48e+00 Uncertainties from rough estimation: norm_0: 2.973e-05 a1_0: 5.231e-02 a2_0: 5.086e-02 r_0: 4.137e-01 c_0: 1.881e-01 ###Markdown The uncertainties here are obtained from the Hessian matrix in the fit routine. To better estimate the uncertainties, we need to perform a Monte-Carlo Markov Chain analysis.Let's have a look of the best-fit profile first. ###Code a3667_chandra_profile.calculate() a3667_chandra_profile.plot(scale="loglog") ###Output _____no_output_____ ###Markdown Now we use Monte-Carlo Markov Chain method to estimate the uncertainties. It takes hours to finish. ###Code a3667_chandra_profile.mcmc_error(nsteps=5000, burnin=500) ###Output 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 5000/5000 [4:14:59<00:00, 3.06s/it] ###Markdown The corner plot ###Code a3667_chandra_profile.plot(plot_type="contour") ###Output _____no_output_____ ###Markdown Let's have a look how the MCMC chains walk. ###Code a3667_chandra_profile.plot(plot_type="mcmc_chain") ###Output _____no_output_____ ###Markdown The uncertainties are stored in the `error` attribute. ###Code print(a3667_chandra_profile.error) a3667_chandra_profile.calculate() ###Output /Users/xyzhang/anaconda3/envs/my/lib/python3.7/site-packages/sbfit-0.1.2-py3.7.egg/sbfit/profile.py:354: RuntimeWarning: invalid value encountered in true_divide /Users/xyzhang/anaconda3/envs/my/lib/python3.7/site-packages/sbfit-0.1.2-py3.7.egg/sbfit/statistics.py:31: RuntimeWarning: invalid value encountered in true_divide
2. Installation of Numpy.ipynb
###Markdown Installing via pipMost major projects upload official packages to the Python Package index. They can be installed on most operating systems using Pythonโ€™s standard pip package manager.Note that you need to have Python and pip already installed on your system.You can install packages via commands such as: ###Code python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose ###Output _____no_output_____ ###Markdown Windows ###Code pip install numpy We recommend using an user install, using the --user flag to pip (note: do not use sudo pip, which can cause problems). This installs packages for your local user, and does not write to the system directories. ###Output _____no_output_____ ###Markdown Install system-wide via a Linux package managerUsers on Linux can install packages from repositories provided by the distributions. These installations will be system-wide, and may have older package versions than those available using pip. ###Code sudo apt-get install python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose ###Output _____no_output_____ ###Markdown Fedora ###Code Fedora 22 and later: sudo dnf install numpy scipy python-matplotlib ipython python-pandas sympy python-nose atlas-devel ###Output _____no_output_____
Big-Data-Clusters/CU2/Public/content/cert-management/cer043-install-controller-cert.ipynb
###Markdown CER043 - Install signed Controller certificate==============================================This notebook installs into the Big Data Cluster the certificate signedusing:- [CER033 - Sign Controller certificate with cluster Root CA](../cert-management/cer033-sign-controller-generated-cert.ipynb)NOTE: At the end of this notebook the Controller pod and all pods thatuse PolyBase (Master Pool and Compute Pool pods) will be restarted toload the new certificates.Steps----- Parameters ###Code app_name = "controller" scaledset_name = "control" container_name = "controller" prefix_keyfile_name = "controller" common_name = "controller-svc" test_cert_store_root = "/var/opt/secrets/test-certificates" ###Output _____no_output_____ ###Markdown Common functionsDefine helper functions used in this notebook. ###Code # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" try: # Load this notebook as json to get access to the expert rules in the notebook metadata. # j = load_json("cer043-install-controller-cert.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: # rules that have 9 elements are the injected (output) rules (the ones we want). Rules # with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029, # not ../repair/tsg029-nb-name.ipynb) if len(rule) == 9: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']} ###Output _____no_output_____ ###Markdown Get the Kubernetes namespace for the big data clusterGet the namespace of the Big Data Cluster use the kubectl command lineinterface .**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ###Code # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ###Output _____no_output_____ ###Markdown Create a temporary directory to stage files ###Code # Create a temporary directory to hold configuration files import tempfile temp_dir = tempfile.mkdtemp() print(f"Temporary directory created: {temp_dir}") ###Output _____no_output_____ ###Markdown Helper function to save configuration files to disk ###Code # Define helper function 'save_file' to save configuration files to the temporary directory created above import os import io def save_file(filename, contents): with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file: text_file.write(contents) print("File saved: " + os.path.join(temp_dir, filename)) print("Function `save_file` defined successfully.") ###Output _____no_output_____ ###Markdown Get name of the โ€˜Runningโ€™ `controller` `pod` ###Code # Place the name of the 'Running' controller pod in variable `controller` controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True) print(f"Controller pod name: {controller}") ###Output _____no_output_____ ###Markdown Copy certifcate files from `controller` to local machine ###Code import os cwd = os.getcwd() os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.p12 {prefix_keyfile_name}-certificate.p12 -c controller -n {namespace}') run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.pem {prefix_keyfile_name}-certificate.pem -c controller -n {namespace}') run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pem {prefix_keyfile_name}-privatekey.pem -c controller -n {namespace}') os.chdir(cwd) ###Output _____no_output_____ ###Markdown Copy certifcate files from local machine to `controldb` ###Code import os cwd = os.getcwd() os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp {prefix_keyfile_name}-certificate.p12 controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.p12 -c mssql-server -n {namespace}') run(f'kubectl cp {prefix_keyfile_name}-certificate.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.pem -c mssql-server -n {namespace}') run(f'kubectl cp {prefix_keyfile_name}-privatekey.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem -c mssql-server -n {namespace}') os.chdir(cwd) ###Output _____no_output_____ ###Markdown Get the `controller-db-rw-secret` secretGet the controller SQL symmetric key password for decryption. ###Code import base64 controller_db_rw_secret = run(f'kubectl get secret/controller-db-rw-secret -n {namespace} -o jsonpath={{.data.encryptionPassword}}', return_output=True) controller_db_rw_secret = base64.b64decode(controller_db_rw_secret).decode('utf-8') print("controller_db_rw_secret retrieved") ###Output _____no_output_____ ###Markdown Update the files table with the certificates through opened SQL connection ###Code import os sql = f""" OPEN SYMMETRIC KEY ControllerDbSymmetricKey DECRYPTION BY PASSWORD = '{controller_db_rw_secret}' DECLARE @FileData VARBINARY(MAX), @Key uniqueidentifier; SELECT @Key = KEY_GUID('ControllerDbSymmetricKey'); SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.p12', SINGLE_BLOB) AS doc; EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.p12', @Data = @FileData, @KeyGuid = @Key, @Version = '0'; SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.pem', SINGLE_BLOB) AS doc; EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.pem', @Data = @FileData, @KeyGuid = @Key, @Version = '0'; SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem', SINGLE_BLOB) AS doc; EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-privatekey.pem', @Data = @FileData, @KeyGuid = @Key, @Version = '0'; """ save_file("insert_certificates.sql", sql) cwd = os.getcwd() os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line run(f'kubectl cp insert_certificates.sql controldb-0:/var/opt/mssql/insert_certificates.sql -c mssql-server -n {namespace}') run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "SQLCMDPASSWORD=`cat /var/run/secrets/credentials/mssql-sa-password/password` /opt/mssql-tools/bin/sqlcmd -b -U sa -d controller -i /var/opt/mssql/insert_certificates.sql" """) # Cleanup run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/insert_certificates.sql" """) run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.p12" """) run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.pem" """) run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-privatekey.pem" """) os.chdir(cwd) ###Output _____no_output_____ ###Markdown Clean up certificate staging areaRemove the certificate files generated on disk (they have now beenplaced in the controller database). ###Code cmd = f"rm -r {test_cert_store_root}/{app_name}" run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"') ###Output _____no_output_____ ###Markdown Clear out the controller\_db\_rw\_secret variable ###Code controller_db_rw_secret= "" ###Output _____no_output_____ ###Markdown Restart `controller` to pick up new certificates.Delete the controller pod so that it can restart the controller and pickup new certificates. ###Code run(f'kubectl delete pod {controller} -n {namespace}') ###Output _____no_output_____ ###Markdown Restart `master pool` pods to pick up new certificates.All pods that use PolyBase need to be restarted to load the newcertificates. ###Code pods = run(f'kubectl get pods -n {namespace} --selector role=master-pool --output=jsonpath={{.items[*].metadata.name}}', return_output=True) for pod in pods.split(' '): run(f'kubectl delete pod {pod} -n {namespace}') ###Output _____no_output_____ ###Markdown Restart `compute pool` pods to pick up new certificates.All pods that use PolyBase need to be restarted to load the newcertificates. ###Code pods = run(f'kubectl get pods -n {namespace} --selector role=compute-pool --output=jsonpath={{.items[*].metadata.name}}', return_output=True) for pod in pods.split(' '): run(f'kubectl delete pod {pod} -n {namespace}') ###Output _____no_output_____ ###Markdown Clean up temporary directory for staging configuration files ###Code # Delete the temporary directory used to hold configuration files import shutil shutil.rmtree(temp_dir) print(f'Temporary directory deleted: {temp_dir}') print('Notebook execution complete.') ###Output _____no_output_____
password_condtion.ipynb
###Markdown ###Code import re a=input("enter the strong password") count=0 while True: if (len(a)<8): count=-1 break elif not re.search("[A-Z]",a): count=-1 break elif not re.search("[0-9]",a): count=-1 break elif not re.search("[_@$]",a): count=-1 break elif not re.search("[a-z]",a): count=-1 break elif re.search("\s",a): count=-1 break else: count=0 print("valid password") break if count is not 0: print("not a valid password") a=input("enter the password") b,c,d,e=0,0,0,0 if (len(a)>=8): for i in a: if (i.islower()): b+=1 elif (i.isupper()): c+=1 elif (i.isdigit()): d+=1 elif (i=='@' or i=='_' or i=='$' or i=='%'): e+=1 if (b>=1 and c>=1 and d>=1 and e>=1 and b+c+d+e==len(a)): print("valid pass word\n") else: print("not a valid password\n") print("try adding the conditions\n\n" + "1.at least 8 characters\n"+ "2.one uppercase\n"+ "3.one lower case\n" + "4.one special character\n") ###Output enter the passwordM not a valid password try adding the conditions 1.at least 8 characters 2.one uppercase 3.one lower case 4.one special character
ANN/DNN1.ipynb
###Markdown Deep Learning Version , oversampled ###Code #imports import time start_time = time.time() import numpy as np from matplotlib import pyplot as plt from keras import Sequential from keras.layers import Dense, Dropout, Flatten from keras.metrics import binary_accuracy #from keras.utils import np_utils print("--- %s seconds ---" % (time.time() - start_time)) # import datasets with time taken! #smoll """ # commented out to save computation start_time = time.time() smoll = np.loadtxt("/home/willett/NeutrinoData/small_CNN_input_processed.txt", comments='#') print("--- %s seconds ---" % (time.time() - start_time)) print(smoll.shape) """ #and the full start_time = time.time() fll = np.loadtxt("/home/willett/NeutrinoData/full_CNN_input_processed.txt", comments='#') print("--- %s seconds ---" % (time.time() - start_time)) print(fll.shape) """ # commented out to save computation #and the full start_time = time.time() fll = np.loadtxt("/home/willett/NeutrinoData/test_CNN_input_processed.txt", comments='#') print("--- %s seconds ---" % (time.time() - start_time)) print(fll.shape) """ # extract title pls = open("/home/willett/NeutrinoData/small_CNN_input_processed.txt", "r") title = pls.readline() title = title[2:-1] print(title) # creating a dataset switch, change what UsedData is to change CNN UD = fll # Used Data = <dataset> UDLength = UD.shape[0] print("shape: ",UD.shape,"\nsize: ", UD.size," \nlength: ", UDLength) # dataset is expected in this format: # FirstLayer LastLayer NHits AverageZP Thrust PID_Angle PID_Front PID_LLR_M #FirstLayer LastLayer NHits_Low AverageZP Thrust_Lo PID_Angle PID_Front PID_LLR_M #Energy_As Angle_Bet Distance_Bet Sig Bg # with Sig and Bg expected as one hot vectors. # splitting X = dataset , Y = one hot vectors X = UD[:,0:-2] Y = UD[:,-2:1000] print("X shape: ",X.shape,"\nY shape: ", Y.shape) # they will be split into testing and training at compile # inevitable bias removal... by oversampling # using a 50% oversampling ratio, because i want to ! (no citation) #how long? start_time = time.time() SigI = np.where(Y[:,0] == 1)[0] BgI= np.where(Y[:,0] == 0)[0] SigN = SigI.size # how much signal there is BgN = BgI.size # how much background there is Multip = int(BgN/SigN) # how much more signal event copies needed for ~50% print(" signal and background event number: ",SigI.size,BgI.size,"\n number more needed:",Multip) SNratio = (100*SigN)/(SigN + BgN) print("initial Signal to Noise ratio: ",SNratio,"% signal") #im going to reconstruct the arrays of signal events, background events, then add them together and shuffle! XSig = X[SigI] XBg = X[BgI] YSig = Y[SigI] YBg = Y[BgI] #print(XSig.shape,XBg.shape, YSig.shape, YBg.shape) # these are the events of each type. # this is the array of signal repreated (tiled) multip times. YSigM = np.transpose(np.tile(np.transpose(YSig), Multip)) XSigM = np.transpose(np.tile(np.transpose(XSig), Multip)) print( XSigM.shape, YSigM.shape) #adding arrays together and then shuffling: X2 = np.concatenate((XBg,XSigM)) Y2 = np.concatenate((YBg,YSigM)) print(X2.shape, Y2.shape) #shuffling print("these arrays should be different vertically, to ensure shuffle succesful:") print(X2[0:3,0],Y2[0:3,0]) np.random.shuffle(X2) np.random.shuffle(Y2) print(X2[0:3,0],Y2[0:3,0]) #final ratio: NewSigN = YSigM.shape[0] SNRatioNew = (100*YSigM.shape[0]) / (YSigM.shape[0] + BgN) print("final Signal Noise ratio: ",SNRatioNew,"% signal") print("--- %s seconds ---" % (time.time() - start_time)) print(X2.shape) X3 = np.expand_dims(X2, axis=2) print(X3.shape) #neural network architecture: model = Sequential() # set variables: width = 30 #--number on nodes in the layer DR = 0.5 #--fraction of nodes dropped during training AT = "sigmoid" #--activation type for dense layers UB = True #--use bias vectors InDim = (X3.shape[1],X3.shape[2] ) #--input shape of single sample (tuple) #construction: start_time = time.time() # how long does it take? model.add(Dense(width,activation=AT, use_bias=UB, input_shape=(19,1) )) # input layer and 1 model.add(Dropout(DR)) model.add(Dense(width,activation=AT, use_bias=UB )) # 2 model.add(Dropout(DR)) model.add(Dense(width,activation=AT, use_bias=UB )) # 3 model.add(Dropout(DR)) model.add(Dense(width,activation=AT, use_bias=UB )) # 4 model.add(Dropout(DR)) #Because this one is deep: model.add(Dense(width,activation=AT, use_bias=UB )) # 3 model.add(Dropout(DR)) model.add(Dense(width,activation=AT, use_bias=UB )) # 4 model.add(Dropout(DR)) model.add(Dense(width,activation=AT, use_bias=UB )) # 3 model.add(Dropout(DR)) model.add(Dense(width,activation=AT, use_bias=UB )) # 4 model.add(Dropout(DR)) model.add(Flatten()) # reduce dimensionality of the input data for output model.add(Dense(2, activation="softmax", use_bias=UB)) # output layer softmax recommended #(classification mutually excluive + softmax differentiable for optimizing) # -> https://www.quora.com/Artificial-Neural-Networks-Why-do-we-use-softmax-function-for-output-layer print("--- %s seconds ---" % (time.time() - start_time)) # Apply regularizer if overfitting! ^ # binary_crossentropy is the best according to https://www.dlology.com/blog/how-to-choose-last-layer-activation-and-loss-function/ # adam is best for me according to https://towardsdatascience.com/types-of-optimization-algorithms-used-in-neural-networks-and-ways-to-optimize-gradient-95ae5d39529f # compile model: start_time = time.time() # how long does it take? model.compile(optimizer='adagrad', loss='binary_crossentropy', metrics=['accuracy', 'binary_accuracy' ]) print("--- %s seconds ---" % (time.time() - start_time)) # Train the model, iterating on the data in batches of 32 samples start_time = time.time() # how long does it take? history = model.fit(X3, # the (now oversampled) dataset Y2, #true or false values for the dataset epochs=100, #number of iteration over data batch_size=32, #number of trainings between tests verbose=1, #prints one line per epoch of progress bar validation_split=0.1 ) #ratio of test to train print("--- %s seconds ---" % (time.time() - start_time)) #summarise history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() ###Output _____no_output_____
2_Curso/Laboratorio/SAGE-noteb/IPYNB/PROBA/118-PROBA-Paseo-2dim.ipynb
###Markdown En este ejercicio estudiamos paseos aleatorios en dos dimensiones. El paseante comienza en el origen $(0,0)$ de $\mathbb{R}^2$ y se mueve por los puntos con coordenadas enteras. En cada momento de tiempo ($t=0,1,2,3,\dots$) elige aleatoriamente uno de sus cuatro puntos (con coordenadas enteras) vecinos y se desplaza a &eacute;l. El problema que queremos estudiar es la determinaci&oacute;n de la probabilidad de que el paseante vuelva en alg&uacute;n momento al origen. El problema que encontramos es que &nbsp;el tiempo que tarda en volver puede ser astron&oacute;micamente grande y usando el ordenador estamos limitados. Para poder tratar este problema debemos fijar un tiempo m&aacute;ximo de espera $T$ ($10^5$ en el ejemplo de m&aacute;s abajo) , y nos dejaremos convencer de que la probabilidad de retorno es $1$ si vemos que va aumentando al dejar crecer $T$ &nbsp;y parece que se aproxima a $1$.&nbsp;Estudiar el caso $3$-dimensional de este mismo problema.Estudiar la variante en la que consideramos dos paseantes aleatorios que comienzan en $t=0 $ en el origen $(0,0)$, y queremos estudiar &nbsp;la probabilidad de que se reecuentren (est&eacute;n en el mismo momento de tiempo en el mismo lugar). ###Code def actualizar(L): x = randint(1,4) if x == 1: L[0] = L[0]+1 return L elif x == 2: L[1] = L[1]+1 return L elif x == 3: L[0] = L[0]-1 return L else: L[1] = L[1]-1 return L def retorno(): cont = 0 Pini = actualizar([0,0]) while (Pini != [0,0] and cont<= 10^5): Pini = actualizar(Pini) cont += 1 #if cont%10000 == 0: #print cont,Pini return cont def probabilidad(N): contador = 0 for muda in xrange(N): cont = retorno() ##print cont if cont != 10^5+1: contador += 1 if muda%100 == 0: print muda return (contador/N).n() probabilidad(10^3) ###Output 0 100 200 300 400 500 600 700 800 900
nobel_physics_prizes/notebooks/4.2-topic-modeling.ipynb
###Markdown Topic ModelingIf you recall, the goals of the unsuccessful [exploratory factor analysis](4.1-exploratory-factor-analysis.ipynb) were to:1. **Reduce the dimensionality of the feature space** to help prevent overfitting when building models.2. **Find a representation of the observed variables in a lower dimensional latent space**. Reducing the variables to **latent factors** helps with interpretability of models.The aim of this notebook is to achieve these goals through a **topic modeling approach**. A [topic model](https://en.wikipedia.org/wiki/Topic_model) is an unsupervised method in [natural language processing](https://en.wikipedia.org/wiki/Natural_language_processing) for discovering **latent topics** in a *corpus* of documents. A topic is essentially a collection of words that statistically co-occur frequently together in documents. So in the topic modeling framework, a document consists of topics and topics are composed of words. It is important to understand that topic modeling is not only restricted to words and can be used for any discrete data. In our case, the discrete data (words) are the binary features and the corpus of documents are the physicists. We will use topic modeling to discover **latent topics**, analogous to the **latent factors** in factor analysis, that underlie the physicists data. The number of topics is specified *a priori* and is expected to correspond to the intrinsic dimensionality of the data. As such it is expected to be much lower than the dimensionality of feature data.[Correlation Explanation](https://www.transacl.org/ojs/index.php/tacl/article/view/1244/275) (**CorEx**) is a discriminative and information-theoretic approach to learning latent topics over documents. It is different from most topic models as it does not assume an underlying generative model for the data. It instead learns maximally informative topics through an information-theoretic framework. The CorEx topic model seeks to maximally explain the dependencies of words in documents through latent topics. CorEx does this by maximizing a lower bound on the [total correlation](https://en.wikipedia.org/wiki/Total_correlation) (multivariate [mutual information](https://en.wikipedia.org/wiki/Mutual_information)) of the words and topics.There are many advantages of the CorEx model that make it particularly attractive. The most relevant ones for this study are:- **No generative model is assumed for the data**, which means means no validation of assumptions that may or may not be true. The latent topics are learnt entirely from the data. This makes the model extremely flexible and powerful.- The method can be used for any **sparse binary dataset** and its algorithm naturally and efficiently takes advantage of the sparsity in the data.- Binary latent topics are learnt, which leads to **highly interpretable models**. A document can consist of no topics, all topics, or any number of topics in between.- **No tuning of numerous hyperparameters**. There is only one hyperparameter, the *number of topics*, and there is a principled way to choose this.More details on the mathematical and implementation details of the CorEx model can be found in section 2 of [Anchored Correlation Explanation: Topic Modeling with Minimal Domain Knowledge](https://www.transacl.org/ojs/index.php/tacl/article/view/1244/275) by Gallagher et al. We will be using the python implementation [corextopic](https://github.com/gregversteeg/corex_topic) for the topic modeling. ###Code import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy.sparse as ss import seaborn as sns from corextopic import corextopic as ct from src.features.features_utils import convert_categoricals_to_numerical from src.data.progress_bar import progress_bar %matplotlib inline ###Output _____no_output_____ ###Markdown Reading in the DataFirst let's read in the training, validation and test features and convert the categorical fields to a numerical form that is suitable for building machine learning models. ###Code train_features = pd.read_csv('../data/processed/train-features.csv') train_features = convert_categoricals_to_numerical(train_features) train_features.head() validation_features = pd.read_csv('../data/processed/validation-features.csv') validation_features = convert_categoricals_to_numerical(validation_features) validation_features.head() test_features = pd.read_csv('../data/processed/test-features.csv') test_features = convert_categoricals_to_numerical(test_features) test_features.head() ###Output _____no_output_____ ###Markdown Model SelectionThere is a principled way for choosing the *number of topics*. Gallagher et al. state that "Since each topic explains a certain portion of the overall total correlation, we may choose the number of topics by observing diminishing returns to the objective. Furthermore, since the CorEx implementation depends on a random initialization (as described shortly), one may restart the CorEx topic model several times and choose the one that explains the most total correlation." Following this suggestion, we have written a function that fits a CorEx topic model over a *number of topics range*. For each *number of topics*, the function fits a specified *number of topic models* and selects the topic model with the highest total correlation (TC). Finally, the topic model with the *number of topics* corresponding to the overall highest TC is chosen (i.e. the model that produces topics that are most informative about the documents). This function takes a few minutes to run as it is doing an exhaustive search over a wide range of the number of topics, so feel free grab a coffee. ###Code def find_best_topic_model(features, num_topic_models=10, num_topics_range=range(1, 11), max_iter=200, eps=1e-05, progress_bar=None): """Find the best topic model as measured by total correlation (TC). Fits a CorEx topic model over a number of topics range. For each number of topics, fits a specified number of topic models and selects the topic model with the highest total correlation (TC), ignoring topic models with empty topics. Finally, the topic model with the value of number of topics corresponding to the overall highest TC is chosen (namely, the model that produces topics that are most informative about the documents). Args: features (pandas.DataFrame): Binary features dataframe. num_topic_models (int, optional): Defaults to 10. Number of topics models to fit for each number of topics. num_topics_range (range, optional): Defaults to range(1, 11). Range of number of topics to fit models over. max_iter (int, optional): Defaults to 200. Maximum number of iterations before ending. eps (float, optional): Defaults to 1e-05. Convergence tolerance. progress_bar (progressbar.ProgressBar, optional): Defaults to None. Progress bar. Returns: corextopic.CorEx: CorEx topic model. CorEx topic model with the highest total correlation. """ if progress_bar: progress_bar.start() X = ss.csr_matrix(features.values) high_tc_topic_models = [] for n_topic in num_topics_range: if progress_bar: progress_bar.update(n_topic) topic_models = [] for n_topic_models in range(1, num_topic_models + 1): topic_model = ct.Corex(n_hidden=n_topic, max_iter=max_iter, eps=eps, seed=n_topic_models) topic_model.fit(X, words=features.columns, docs=features.index) if _has_empty_topics(topic_model): # unstable model so ignore continue topic_models.append((topic_model, topic_model.tc)) if not topic_models: continue # for given number of topics, find model with highest total correlation (TC) topic_models.sort(key=lambda x:x[1], reverse=True) high_tc_topic_models.append((topic_models[0][0], topic_models[0][1])) # find overall model with highest total correlation (TC) high_tc_topic_models.sort(key=lambda x:x[1], reverse=True) high_tc_model = high_tc_topic_models[0][0] if progress_bar: progress_bar.finish() return high_tc_model def _has_empty_topics(model): for n_topic in range(model.n_hidden - 1, 0, -1): if not model.get_topics(topic=n_topic): return True return False num_topics_range=range(1, 31) topic_model = find_best_topic_model( train_features, num_topic_models=20, num_topics_range=num_topics_range, progress_bar=progress_bar(len(num_topics_range), banner_text_begin='Running: ', banner_text_end=' topics range')) print('Number of latent factors (topics) = ', topic_model.n_hidden) print('Total correlation = ', round(topic_model.tc, 2)) ###Output _____no_output_____ ###Markdown So the optimal number of topics is 25. Note that we have tuned the `num_topic_models` so that this number is stable. If for instance the `num_topic_models` is reduced to 10, then the value of the optimal number of topics will change due to the random initializations of the CorEx topic model. Let's now observe the distribution of TCs for each topic to see how much each additional topic contributes to the overall TC. We should keep adding topics until additional topics do not significantly contribute to the overall TC. ###Code def plot_topics_total_correlation_distribution( topic_model, ylim=(0, 2.5), title='Topics total correlation distribution', xlabel='Topic number'): """Plot the total correlation distribution of a CorEx topic model. Args: topic_model (corextopic.CorEx): CorEx topic model. ylim (tuple of (`int`, `int`), optional): Defaults to (0, 2.5). y limits of the axes. title (str, optional): Defaults to 'Topics total correlation distribution'. Title for axes. xlabel (str, optional):. Defaults to 'Topic number'. x-axis label. """ plt.bar(range(0, topic_model.tcs.shape[0]), topic_model.tcs) plt.xticks(range(topic_model.n_hidden)) plt.ylim(ylim) plt.title(title) plt.xlabel(xlabel) plt.ylabel('Total correlation (nats)') plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) plt.tick_params(bottom=False, left=False) plot_topics_total_correlation_distribution(topic_model) ###Output _____no_output_____ ###Markdown Looking at the plot, you can see that this statement is fairly subjective. Should we take 10, 12, 15, 18 or 22 topics? A slightly more principled way would be to look at the cumulative distribution and select the minimum number of topics that explains say 95% of the overall topics total correlation. This is similar to an explained variance cut-off value in principal component analysis. The plot is shown below. ###Code def plot_topics_total_correlation_cumulative_distribution( topic_model, ylim=(0, 17), cutoff=None, title='Topics total correlation cumulative distribution', xlabel='Topic number'): """Plot the total correlation cumulative distribution of a CorEx topic model. Args: topic_model (corextopic.CorEx): CorEx topic model. ylim (tuple of (`int`, `int`), optional): Defaults to (0, 2.5). y limits of the axes. cutoff (float, optional). Defaults to None. `If float, then 0 < cutoff < 1.0. The fraction of the cumulative total correlation to use as a cutoff. A horizontal dashed line will be drawn to indicate this value. title (str, optional): Defaults to 'Topics total correlation cumulative distribution'. Title for axes. xlabel (str, optional): Defaults to 'Topic number'. x-axis label. """ plt.bar(range(0, topic_model.tcs.shape[0]), np.cumsum(topic_model.tcs)) if cutoff: plt.axhline(cutoff * topic_model.tc, linestyle='--', color='r') plt.xticks(range(topic_model.n_hidden)) plt.ylim(ylim) plt.title(title) plt.xlabel(xlabel) plt.ylabel('Total correlation (nats)') plt.gca().spines['right'].set_visible(False) plt.gca().spines['top'].set_visible(False) plt.tick_params(bottom=False, left=False) plot_topics_total_correlation_cumulative_distribution(topic_model, cutoff=0.95) ###Output _____no_output_____ ###Markdown Using this criteria suggests that 18 topics would be appropriate. However, again this is fairly subjective. Should we choose a cut-off of 90%, 95% or 99%? All of these different values would change the conclusion of the number of topics to retain. As there are so few topics anyway, it makes more sense to retain all 25 topics and not lose any further information. You will also see shortly that there is some interesting information in the tail of the topics. TopicsNow we will take a look at the produced topics, in descending order of the total correlation they explain, to see how coherent they are. The features in topics are ranked in descending order of their [mutual information](https://en.wikipedia.org/wiki/Mutual_information) with the topic. So features with higher values of mutual information are more associated with the topic than features with lower values. Do not be alarmed by the negative values of mutual information. As Gallagher explains in the [notebook example](https://github.com/gregversteeg/corex_topic/blob/master/corextopic/example/corex_topic_example.ipynb), "Theoretically, mutual information is always positive. If the CorEx output returns a negative mutual information from `get_topics()`, then the absolute value of that quantity is the mutual information between the topic and the absence of that word." We add labels to the topics to aid with their interpretability. ###Code latent_factors = {'is_eu_worker':'European Workers', 'is_eu_alumni':'European Alumni', 'is_alumni':'Alumni', 'is_na_eu_resident':'North American and European Residents', 'is_na_citizen':'North American Citizens', 'is_na_worker':'North American Workers', 'is_as_citizen':'Asian Citizens', 'is_na_alumni':'North American Alumni', 'is_gbr_citizen':'British Citizens', 'is_rus_citizen':'Russian Citizens', 'is_deu_citizen':'German Citizens', 'is_nld_ita_che_citizen':'Netherlands, Italian and Swiss Citizens', 'is_studyholic':'Studyholics', 'is_workhorse':'Workhorses', 'is_aut_citizen':'Austrian Citizens', 'is_eu_citizen':'European Citizens', 'is_gbr_worker':'British Workers', 'is_passport_collector':'Passport Collectors', 'is_born':'Born', 'is_fra_citizen':'French Citizens', 'is_other_citizen':'Other Citizens', 'is_emigrant':'Emigrants', 'is_physics_laureate_teacher':'Physics Laureate Teachers', 'is_physics_laureate_student':'Physics Laureate Students', 'is_astronomer':'Astronomers' } def plot_topics(topic_model, topic_labels=None, max_features_per_topic=15, xlim=(-0.5, 1), ylabel='Feature', figsize=None, plotting_context='notebook'): """Plot the topics of a CorEx topic model. Args: topic_model (corextopic.CorEx): CorEx topic model. topic_labels (list of `str`, optional): Defaults to None. Topic labels for each axis. max_features_per_topic (int, optional): Maximum number of features to plot per topic. xlim (tuple of (`int`, `int`), optional): Defaults to (-0.5, 1). x limits of the axes. ylabel (str, optional): Defaults to 'Feature'. y-axis label. figsize (tuple of (`int`, `int`), optional): Defaults to None. Figure size in inches x inches. plotting_context (str, optional): Defaults to `notebook`. Seaborn plotting context. """ with sns.plotting_context(plotting_context): fig, ax = plt.subplots(nrows=topic_model.n_hidden, ncols=1, sharex=False, figsize=figsize) plt.subplots_adjust(hspace=200) for n_topic in range(topic_model.n_hidden): topic = topic_model.get_topics(n_words=max_features_per_topic, topic=n_topic) labels = [label[0] for label in topic] mutual_info = [mi[1] for mi in topic] ax[n_topic].barh(labels, mutual_info) ax[n_topic].set_xlim(xlim) ax[n_topic].set_ylim(-0.5, max_features_per_topic - 0.5) if topic_labels: title = topic_labels[n_topic] else: title = 'topic_' + str(n_topic) ax[n_topic].set(title=title, xlabel='Mutual information (nats)', ylabel=ylabel) fig.tight_layout() plot_topics(topic_model, topic_labels=list(latent_factors.values()), figsize=(20, 280), plotting_context='talk') ###Output _____no_output_____ ###Markdown As you can see, the topic labels are self-explanatory and correspond mainly with the dominant features of each topic, as measured by the mutual information. As explained before, the features with very low mutual information are not really informative about the topic. The fact we could put a name to every topic shows just how discriminative the topic modeling is. It's impressive how coherent some of the topics are. The *North American Workers*, *North American Alumni*, *Workhorses*, *Studyholics* and *French Citizens* topics are exemplerary examples of such topics. The *Born* topic is definitely the least coherent topic and maybe suggests that the features in this topic were probably not so useful to begin with. Top Documents Per TopicAs with the topic features, the most probable documents (physicists) per topic can also be easily accessed, and it is interesting to take a look at a few of these. As Gallagher says, they "are sorted according to log probabilities which is why the highest probability documents have a score of 0 ($e^0 = 1$) and other documents have negative scores (for example, $e^{-0.5} \approx 0.6$)."OK let's take a look at the top physicists in the *European Workers* (topic 0), *Workhorses* (topic 13) and *Physics Laureate Teachers* (topic 22). ###Code topic_model.get_top_docs(n_docs=30, topic=0, sort_by='log_prob') ###Output _____no_output_____ ###Markdown The names here seem reasonable as physicists who have worked in Europe. But as you can see from the probabilities, a lot of the physicists have a similar mutual information with this topic. It's a different story if we use the TC instead. This is more discriminative, but from the warning message you can see that Gallagher does not yet recommend this. ###Code topic_model.get_top_docs(n_docs=30, topic=0, sort_by='tc') ###Output _____no_output_____ ###Markdown Below we see the real workhorses of physics. The probabilites here seem to discriminate the physicists a lot better. If you examine the Wikipedia Infobox `Institutions` field of some of these physicists, you will see the breadth of workplaces corroborates this list. ###Code topic_model.get_top_docs(n_docs=30, topic=13, sort_by='log_prob') ###Output _____no_output_____ ###Markdown Below we see the great teachers and influencers of physics laureates, many of whom are laureates themselves. Likewise, if you take a look at the Wikipedia Infobox `Doctoral students` and `Other notable students` fields of some of these physicists, you will see the number of laureates they have had an impact on. Interestingly, the first paragraph of [Arnold Sommerfeld's Wikipedia article](https://en.wikipedia.org/wiki/Arnold_Sommerfeld) focuses on this aspect of his career and compares him to *J. J. Thomson*. ###Code topic_model.get_top_docs(n_docs=30, topic=22, sort_by='log_prob') ###Output _____no_output_____ ###Markdown Projecting Features to the Topic SpaceCorEx is a discriminative model which means that it estimates the probability a document (i.e. physicist) belongs to a topic given that document's words (i.e. features). The estimated probabilities of topics for each document can be obtained through the topic model's properties `log_p_y_given_x` or `p_y_given_x` or function `predict_proba`. A binary determination of which documents belong to each topic is obtained using a softmax and can be accessed through the topic model's `labels` property or function `transform` (or `predict`). We will now use the latter to reduce the dimensionality of the original binary features by projecting them into the latent space spanned by the binary topics of the topic model. ###Code def project_features_to_topic_space(features, topic_model, columns=None): """Project the binary features to the latent space spanned by the binary topics of the topic model. Args: features (pandas.DataFrame): Binary features dataframe. topic_model (corextopic.CorEx): CorEx topic model. topic_labels (list of `str`, optional): Defaults to None. Topic labels to use as columns for the dataframe. Returns: pandas.DataFrame: Binary features dataframe containing the topics. """ X = ss.csr_matrix(features.values) X_topics = topic_model.transform(X) features_topics = pd.DataFrame(X_topics, index=features.index, columns=columns) features_topics = features_topics.applymap(lambda x: 'yes' if x == True else 'no') return features_topics train_features_topics = project_features_to_topic_space( train_features, topic_model, list(latent_factors.keys())) train_features_topics.head() validation_features_topics = project_features_to_topic_space( validation_features, topic_model, list(latent_factors.keys())) validation_features_topics.head() test_features_topics = project_features_to_topic_space( test_features, topic_model, list(latent_factors.keys())) test_features_topics.head() ###Output _____no_output_____ ###Markdown You may be wondering why we did not just use the estimated probabilities as the reduced dimension features. Most likely a model built from those features would be more accurate than one built from the binary features. Interpretability is the answer. For example, it does not make much sense to talk about the probability of a physicist being a *European Worker* or not. S/he is either a *European Worker* or not. It is more natural to say, for instance, that a physicist is a Nobel Laureate because s/he is a *European Worker*, a *North American Citizen* and a *Physics Laureate Teacher*, etc.The *European Alumni* and *Astronomer* topics are interesting as they both consist of only one feature. Therefore, you would expect a one-to-one correspondence between the labels in the topic and the label in the original feature. However, this is not always the case as the topic has actually "flipped" the label for some of the physicists. We are not exactly sure why it happens. Clearly it is a quirk of the topic modeling. ###Code len(train_features) - (train_features_topics.is_eu_alumni == train_features.alumnus_in_EU.map( {1: 'yes', 0:'no'})).sum() len(train_features) - (train_features_topics.is_astronomer == train_features.is_astronomer.map( {1: 'yes', 0:'no'})).sum() ###Output _____no_output_____ ###Markdown Persisting the DataNow we have the training, validation and test features dataframes in the topic model space, we will persist them for future use. ###Code train_features_topics.to_csv('../data/processed/train-features-topics.csv') validation_features_topics.to_csv('../data/processed/validation-features-topics.csv') test_features_topics.to_csv('../data/processed/test-features-topics.csv') ###Output _____no_output_____
Big_data_Assignment_6.ipynb
###Markdown Initial SparkSession ###Code spark = SparkSession.builder \ .master("local") \ .appName("Assignment 6") \ .config("spark.some.config.option", "some-value") \ .getOrCreate() import os os.getcwd() ###Output _____no_output_____ ###Markdown Reading dataset ###Code #laod data as RDD sc=spark.sparkContext rdd_d=sc.textFile('/content/sample_data/california_housing_test.csv') rdd_d.take(10) #load the data as dataframe data=spark.read.csv('/content/sample_data/california_housing_test.csv',header=True) data.show(10) #remove the first row f=rdd_d.first() rdd_d=rdd_d.filter(lambda r:r!=f) ## convert rdd to dataframe a=rdd_d.map(lambda x:x.split(",")) b=a.map(lambda p: Row(longitude=p[0],latitude=p[1],housing_median_age=p[2],total_rooms=p[3],total_bedrooms=p[4],population=p[5],households=p[6],median_income=p[7],median_house_value=p[8])) rdd_df=spark.createDataFrame(b) rdd_df.show(10) # convert dataframe to rdd rdd1=rdd_df.rdd rdd1 # convert spark dataframe to pandas dataframe pandas=data.toPandas() pandas ###Output _____no_output_____ ###Markdown Task ###Code #Task 1 ---Select first 10 rows of dataset rdd_df.show(10) #Task 2---show the schema of the dataset rdd_df.printSchema() # Task 3---Group by and get max, min, count of a column in the dataset rdd_df.groupBy('households').count().show() rdd_df.select(max('population'),min('population')).show() #Task 4---Filter your dataset by some conditions based on your column rdd_df.filter(rdd_df['population']>500).show() #Task 5---Apply group by with having clause rdd_df.groupBy('median_income').agg(sum('households')).alias('sum_households').show() #Task 6 ---Apply order by rdd_df.select('*').orderBy('households').show() #Task 7---Select distinct records by a column rdd_df.select('housing_median_age').distinct().show() #Task 8---Transform the data type of columns from int to string from pyspark.sql.types import * new_rdd_df=rdd_df.withColumn('households',rdd_df['households'].cast(StringType())) new_rdd_df.printSchema() ###Output _____no_output_____
notebooks/LazyGreedy.ipynb
###Markdown Lazy versus greedy evaluation ###Code %matplotlib inline %run notebook_setup.py import starry starry.config.quiet = True ###Output _____no_output_____ ###Markdown tl;drVersion `1.0` of the code evaluates things *lazily* by default, meaning that all internal values are nodes in a graph, stored as `theano` tensors. Lazy mode is required for interfacing with `pymc3` to do inference (refer to the several tutorials on `pymc3` sampling). If you *really* need the value of a `theano` object, you can always call its `eval()` method, but keep in mind that operation can be somewhat slow.If, on the other hand, you're not interested in using `pymc3` or in any of the derivatives of `starry` models, you can disable lazy evaluation by typing```pythonstarry.config.lazy = False```at the top of your script, *before* you instantiate any `starry` maps. If you do that, `starry` will behave as it did in previous versions: you don't have to call the `eval()` method or worry about any tensor nonsense. Lazy mode One of the big changes in version `1.0` of `starry` is *lazy evaluation* mode, which is now the default. [Lazy evaluation](https://en.wikipedia.org/wiki/Lazy_evaluation) means that the evaluation of all expressions in the code is delayed until a numerical value is needed (i.e., when outputting or plotting the result). This is as opposed to [greedy or eager evaluation](https://en.wikipedia.org/wiki/Eager_evaluation), in which all expressions are evaluated on-the-fly, as soon as the code encounters them. In lazy evaluation mode, expressions are compiled and stored in memory as *nodes in a graph*, which are only executed when a numerical value is required. This strategy allows for some cool compile-time optimization under the hood. But by far the greatest advantage of lazy evaluation (at least in our case) is that it makes it easy to autodifferentiate expressions using backpropagation. This lets us compute derivatives of all expressions extremely efficiently, and those can be seemlessly integrated into derivative-based MCMC sampling schemes such as Hamiltonian Monte Carlo or NUTS.Version `1.0` of `starry` is built on top of the [theano](https://github.com/Theano/Theano) machine learning library, which handles all of the graph compiling and backpropagation. There's lots of other software that does similar things (such as `tensorflow` and `pytorch`), but the advantage of `theano` is that it is also the backbone of [exoplanet](https://github.com/dfm/exoplanet) and [pymc3](https://github.com/pymc-devs/pymc3). This allows us to easily integrate `starry` with all the cool inference machinery of those two packages.Let's look at some examples of how lazy evaluation works in `starry`. Let's instantiate a regular `starry` map: ###Code import starry map = starry.Map(ydeg=1) ###Output _____no_output_____ ###Markdown We can give this map a simple dipole by assigning a value to the coefficient of the $Y_{1,0}$ spherical harmonic: ###Code map[1, 0] = 0.5 ###Output _____no_output_____ ###Markdown Since the coefficient of the $Y_{0,0}$ harmonic is fixed at unity, our spherical harmonic coefficients are now the vector $y = (1, 0, \frac{1}{2}, 0)$. Here's what that looks like: ###Code map.show() ###Output _____no_output_____ ###Markdown Recall that the spherical harmonic coefficients are stored in the `y` attribute of the map. Let's take a look: ###Code map.y ###Output _____no_output_____ ###Markdown That doesn't look right, but it *is*: the vector $y$ is stored internally as a `theano` tensor and doesn't yet have a numerical value: ###Code type(map.y) ###Output _____no_output_____ ###Markdown In order to access its value, I can call its `eval` method: ###Code map.y.eval() ###Output _____no_output_____ ###Markdown Which is what we expected. A similar thing happens when we call a method such as `flux`: ###Code map.flux(xo=0.4, yo=0.3, ro=0.1, theta=30) map.flux(xo=0.4, yo=0.3, ro=0.1, theta=30).eval() ###Output _____no_output_____ ###Markdown As we mentioned above, it's not generally a good idea to call the `eval()` method, since it can be quite slow. The whole point of lazy evaluation mode is so that `starry` can be easily integrated with `pymc3`. When building a `pymc3` model, `pymc3` handles all of the evaluations internally, so there's no need to call `eval()`. Within a `pymc3` model context, users can pass `pymc3` variables, `theano` variables, and/or `numpy` arrays to any `starry` method; casting is handled internally in all cases. Check out the tutorials on inference with `pymc3` for more information. If, on the other hand, you're not planning on integrating `starry` with `pymc3`, you should probably run it in greedy mode. See below. Greedy mode To run `starry` in greedy (i.e., not lazy) mode, you can add the following line somewhere near the top of your script: ###Code # *-*-*- DON'T DO THIS AT HOME! -*-*-* # You shouldn't mix greedy and lazy maps in # the same session, as you risk angering theano. # I'm able to get away with it in this example # because I'm just evaluating a few variables. # But if I were to try to do anything else, things # would probably break! starry.config._allow_changes = True starry.config.lazy = False ###Output _____no_output_____ ###Markdown (Note that if you try to change the evaluation mode after you've instantiated a `starry` map, the code will complain.)In greedy mode, things behave as they did in previous versions of the code. Check it out: ###Code map = starry.Map(ydeg=1) map[1, 0] = 0.5 map.y type(map.y) ###Output _____no_output_____ ###Markdown All methods are automatically compiled and return numerical outputs: ###Code map.flux(xo=0.4, yo=0.3, ro=0.1, theta=30) ###Output _____no_output_____
MLCourse/Outliers.ipynb
###Markdown Dealing with Outliers Sometimes outliers can mess up an analysis; you usually don't want a handful of data points to skew the overall results. Let's revisit our example of income data, with some random billionaire thrown in: ###Code %matplotlib inline import numpy as np incomes = np.random.normal(27000, 15000, 10000) incomes = np.append(incomes, [1000000000]) import matplotlib.pyplot as plt plt.hist(incomes, 50) plt.show() ###Output _____no_output_____ ###Markdown That's not very helpful to look at. One billionaire ended up squeezing everybody else into a single line in my histogram. Plus it skewed my mean income significantly: ###Code incomes.mean() ###Output _____no_output_____ ###Markdown It's important to dig into what is causing your outliers, and understand where they are coming from. You also need to think about whether removing them is a valid thing to do, given the spirit of what it is you're trying to analyze. If I know I want to understand more about the incomes of "typical Americans", filtering out billionaires seems like a legitimate thing to do.Here's something a little more robust than filtering out billionaires - it filters out anything beyond two standard deviations of the median value in the data set: ###Code def reject_outliers(data): u = np.median(data) s = np.std(data) filtered = [e for e in data if (u - 2 * s < e < u + 2 * s)] return filtered filtered = reject_outliers(incomes) plt.hist(filtered, 50) plt.show() ###Output _____no_output_____ ###Markdown That looks better. And, our mean is more, well, meangingful now as well: ###Code np.mean(filtered) ###Output _____no_output_____
RNN_predict.ipynb
###Markdown For short time, it works well, but for long time it is not good enough.... ###Code from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os import tensorflow as tf import pandas as pd tf.__version__ import matplotlib import matplotlib.pyplot as plt def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed) # data train_dataset=pd.read_csv('Time_series_train',names='F') test_dataset=pd.read_csv('Time_series_vali',names='F') train_dataset=pd.DataFrame.to_numpy(train_dataset) test_dataset=pd.DataFrame.to_numpy(test_dataset) train_dataset # prepare batch t_min, t_max=0, 600 resolution=0.1 #batch_size=50 # #n_steps=20 # def next_batch(batch_size,n_steps,dataset): sample_list=range(1,len(dataset)-n_steps) from random import sample train_sample=np.array(sample(sample_list,batch_size)).reshape(batch_size,1) index=train_sample+np.arange(0, n_steps + 1) ys=dataset[index] return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1) n_steps=20 t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1) x_batch,y_batch=next_batch(1,n_steps,train_dataset) t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1) plt.title("Testing the model", fontsize=14) plt.plot(t_instance[:-1], x_batch.reshape(n_steps), "bo", markersize=10, label="X_batch") plt.plot(t_instance[1:], y_batch.reshape(n_steps), "r*", markersize=10, label="y_batch") plt.legend(loc="upper left") plt.xlabel("Time") plt.show() t_min, t_max = 0, 300 resolution = 0.1 def time_series(t): return t * np.sin(t) / 3 + 2 * np.sin(t*5) def next_batch(batch_size, n_steps,train): t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution) Ts = t0 + np.arange(0., n_steps + 1) * resolution ys = time_series(Ts) return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1) X_batch,y_batch=next_batch(1,20,train_dataset) np.c_[X_batch, y_batch] reset_graph() n_steps =20 n_inputs = 1 n_neurons = 200 n_outputs = 1 X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.float32, [None, n_steps, n_outputs]) cell = tf.contrib.rnn.OutputProjectionWrapper( tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu), output_size=n_outputs) outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32) #%% learning_rate = 0.001 loss = tf.reduce_mean(tf.square(outputs - y)) # MSE optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) init = tf.global_variables_initializer() #%% saver = tf.train.Saver() #%% n_iterations = 2000 batch_size = 30 with tf.Session() as sess: init.run() for iteration in range(n_iterations): X_batch, y_batch = next_batch(batch_size, n_steps,train_dataset) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) if iteration % 100 == 0: mse = loss.eval(feed_dict={X: X_batch, y: y_batch}) print(iteration, "\tMSE:", mse) saver.save(sess, "./my_time_series_model") # not shown in the book #%% t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1) X_new,y_new=next_batch(1,n_steps ,test_dataset) X_new with tf.Session() as sess: # not shown in the book saver.restore(sess, "./my_time_series_model") # not shown y_pred = sess.run(outputs, feed_dict={X: X_new}) #%% y_pred.reshape(n_steps) plt.title("Testing the model", fontsize=14) plt.plot(t_instance[:-1], X_new.reshape(n_steps), "bo", markersize=10, label="instance") plt.plot(t_instance[1:], y_new.reshape(n_steps), "r*", markersize=10, label="target") plt.plot(t_instance[1:], y_pred[0,:,0], "g.", markersize=10, label="prediction") plt.legend(loc="upper left") plt.xlabel("Time") plt.show() ###Output _____no_output_____ ###Markdown **Multi layer LSTM cell** ###Code n_steps = 50 n_inputs = 1 n_neurons = 250 n_outputs = 1 n_layers = 4 n_iterations=1000 learning_rate = 0.001 X_batch,y_batch=next_batch(1,n_steps,train_dataset) reset_graph() lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons) #! keep_prob = tf.placeholder_with_default(1.0, shape=()) X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) y = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) lstm_cells = [tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons) for layer in range(n_layers)] multi_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells) rnn_outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32) learning_rate = 0.001 stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons]) stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs) outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs]) loss = tf.reduce_mean(tf.square(outputs - y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) init = tf.global_variables_initializer() saver = tf.train.Saver() n_iterations = 1000 batch_size = 20 n_epochs = 5 with tf.Session() as sess: init.run() for epoch in range(n_epochs): for iteration in range(n_iterations): X_batch, y_batch = next_batch(batch_size, n_steps,train_dataset) _, mse = sess.run([training_op, loss], feed_dict={X: X_batch, y: y_batch}) if iteration % 100 == 0: # not shown in the book print(iteration, "Training MSE:", mse) # not shown saver.save(sess, "./my_dropout_time_series_model") t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1) X_new,y_new=next_batch(1,n_steps ,test_dataset) X_new with tf.Session() as sess: # not shown in the book saver.restore(sess, "./my_dropout_time_series_model") # not shown y_pred = sess.run(outputs, feed_dict={X: X_new}) plt.title("Testing the model", fontsize=14) plt.plot(t_instance[:-1], X_new.reshape(n_steps), "bo", markersize=10, label="instance") plt.plot(t_instance[1:], y_new.reshape(n_steps), "y*", markersize=10, label="target") plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction") plt.legend(loc="upper left") plt.xlabel("Time") plt.show() number=20 sequence=train_dataset[0:n_steps].reshape(n_steps) sequence=sequence.tolist() with tf.Session() as sess: saver.restore(sess, "./my_dropout_time_series_model") for iteration in range(number): X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1) y_pred = sess.run(outputs, feed_dict={X: X_batch}) sequence.append(y_pred[0, -1, 0]) X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1) X_batch plt.title("Prediction", fontsize=14) plt.plot( np.array(sequence[0:n_steps]).reshape(n_steps), "ro", markersize=10, label="start") plt.plot( np.array(sequence).reshape(number+n_steps), "b.", markersize=10, label="true") plt.plot(train_dataset[0:number+n_steps].reshape(number+n_steps),"y*", markersize=10, label="prediction") plt.legend(loc="upper left") sequence ###Output _____no_output_____
Assignments/GEOS518-Assignment1-AutocorrelationFunction.ipynb
###Markdown Jupyter Notebook Assignment Number 1 Due: 07 February 2018**Instructions:** In this Jupyter notebook, you will should perform the following tasks with your chosen dataset:1. Import it and create a plot2. Compute and show key descriptive statistics about the time series that might include the mean, variance, and/or histograms3. Assess the stationarity of the time series in one or both of the following ways: (1) compute and report the slope of a regression line through time (and it's significance), (2) compute and report the [Augmented Dickey-Fuller test](https://machinelearningmastery.com/time-series-data-stationary-python/)4. Compute and plot the autocorrelation function of the time series. Note that if your examination reveals that the time series likely has a significant trend through time, you should either attempt to detrend it, or discuss the ramifications of not doing so at length.5. Discuss in detail the results of your analyses in terms of the physical reasons that you observe the trends you see. For example, can you explain why the autocorrelation function behaves as you observe it to based on physical intuition?Characteristics of exemplary work:* You use available libraries and (in comments) justify their use* Your code is well commented and you describe each step that you are doing in your code* Your plots are adequately sized. Axes, plots and legends labeled with font sizes that are readable. The marker and line styles and sizes are appropriate* You use Markdown cells to describe, in detail what each code cell is doing. Markdown cells and code cells are adequately* Your notebook response contains a Markdown cell that provides an overview of the problem statement, your approach, and key findings (i.e., these three things should correspond to sections) ###Code # Libraries that might be helpful. Note, ignore any errors regarding pandas.core.datatools being deprecated import pandas as pd import numpy as np import matplotlib.pyplot as plt import statsmodels.tsa.api as sm ###Output _____no_output_____
src/lab3_nvp_v2.ipynb
###Markdown Lab exercise: Real NVP ###Code import numpy as np import scipy.stats import matplotlib.pyplot as plt import itertools import random import math import time import torch import torch.nn as nn import torch.nn.functional as F import torch.autograd as autograd %matplotlib inline pltparams = { 'legend.fontsize': 'x-large', 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large', 'figure.titlesize': 'x-large', 'savefig.dpi': 300, } plt.rcParams.update(pltparams) # sns.set(font_scale = 1.2) # samples1-2 shape must be (n samples, 2) def plot_samples(samples1, samples2=None): fig, ax = plt.subplots() ax.scatter(samples1[:,0], samples1[:,1], marker="x", color="blue") if samples2 is not None: ax.scatter(samples2[:,0], samples2[:,1], marker="x", color="red") return fig import sklearn.datasets target_samples, target_classes = sklearn.datasets.make_moons(1000, noise=0.1) target_samples = torch.from_numpy(target_samples).float() fig = plot_samples(target_samples) ###Output _____no_output_____ ###Markdown Foward from z to x\begin{align*}g = z * exp(s(z * mask)) + t (z * mask)\\x = z * mask + g * (1 - mask)\end{align*}Inverse from x to z\begin{align*}g^{-1} = (x - t(x * mask)) * exp(-s(x * mask))\\z = x * mask + g^{-1} * (1 - mask)\end{align*} ###Code class RealNVPLayer(nn.Module): def __init__(self, size, reverse=False): super().__init__() self.mask = torch.zeros(size, requires_grad=False) mid = int(size / 2) self.mid = mid if reverse: self.mask[mid:] = 1. else: self.mask[:mid] = 1. ## the two operations self.scale = nn.Sequential( nn.Linear(size, 10), nn.Tanh(), nn.Linear(10, size), ) self.translate = nn.Sequential( nn.Linear(size, 10), nn.Tanh(), nn.Linear(10, size), ) # project from the latent space to the observed space, # i.e. x = g(z) def forward(self, z): n_mask = 1. - self.mask z_masked = z * self.mask transform = z * torch.exp(self.scale(z_masked)) + self.translate(z_masked) x = z_masked + transform * n_mask return x # project from the observed space to the latent space, # this function also return the log det jacobian of this inv function def inv(self, x): n_mask = 1. - self.mask x_masked = x * self.mask scaled = self.scale(x_masked) reversetransform = (x - self.translate(x_masked)) * torch.exp(-scaled) z = x_masked + reversetransform * n_mask log_det_jacobian = torch.sum(-scaled * n_mask, dim=-1) return z, log_det_jacobian # Test! layer = RealNVPLayer(2, reverse=False) with torch.no_grad(): x = torch.rand(1, 2) z, _ = layer.inv(x) xx = layer(z) print("In the 3 vectors below, the first element must be equal") print("This two vectors should be equal:") print(x) print(xx) print("This vector should be different to the two above") print(z) print() layer = RealNVPLayer(2, reverse=True) with torch.no_grad(): x = torch.rand(1, 2) z, _ = layer.inv(x) xx = layer(z) print("In the 3 vectors below, the second element must be equal") print("This two vectors should be equal:") print(x) print(xx) print("This vector should be different to the two above") print(z) class RealNVP(nn.Module): def __init__(self, size, n_layers): super().__init__() self.prior = torch.distributions.normal.Normal(torch.zeros(2), torch.ones(2)) self.layers = nn.ModuleList( RealNVPLayer(size, i % 2 == 0) for i in range(n_layers) ) def forward(self, z): x = z for i in range(len(self.layers)): x = self.layers[i](x) return x def inv(self, x): log_det_jacobian = 0. z = x for i in reversed(range(len(self.layers))): z, j = self.layers[i].inv(z) # remember here, we just have to sum all log det jacobians! log_det_jacobian = log_det_jacobian + j return z, log_det_jacobian def sample(self, n_samples): z = self.prior.sample((n_samples,)) x = self(z) return x def log_prior(self, x): z, det = self.inv(x) ret = self.prior.log_prob(z).sum(1) + det return ret trained_distrib = RealNVP(2, 50) optimizer = torch.optim.Adam(trained_distrib.parameters(), lr=1e-3) batch_size = 1000 losses = list() for _ in range(500): for i in range(0, target_samples.shape[0], batch_size): batch = target_samples[i:i+batch_size] optimizer.zero_grad() loss = -trained_distrib.log_prior(batch).mean() losses.append(loss.item()) loss.backward() torch.nn.utils.clip_grad_norm_(trained_distrib.parameters(), 5) optimizer.step() plt.plot(np.arange(len(losses)), losses) # sample from the model with torch.no_grad(): samples = trained_distrib.sample(1000) fig = plot_samples(target_samples, samples) fig.savefig("sample.pdf") # print the latent space corresponding to each half moon in a different color with torch.no_grad(): source_sample1, _ = trained_distrib.inv(target_samples[target_classes == 0]) source_sample2, _ = trained_distrib.inv(target_samples[target_classes == 1]) fig = plot_samples(source_sample1, source_sample2) fig.savefig("latent.pdf") ###Output _____no_output_____
Fairoza/Module-03/Dictionaries.ipynb
###Markdown Solution for Dictionaries Exercises 1. Create a dictionary which contains 'english spelling' for keys and 'number' in integer as values from 1 to 10 in random orders. For example, one: 1, two: 2. Set the dictionary as variable 'numbers'. ###Code numbers = {'one': 1, 'three': 3, 'seven': 7, 'two': 2, 'four': 4, 'eight': 8, 'ten': 10, 'five': 5, 'six': 6, 'nine': 9} print(numbers) ###Output {'one': 1, 'three': 3, 'seven': 7, 'two': 2, 'four': 4, 'eight': 8, 'ten': 10, 'five': 5, 'six': 6, 'nine': 9} ###Markdown 2. What is the second key of *numbers*? ###Code print(numbers[1]) numbers["three"] ###Output _____no_output_____ ###Markdown 3. Find the number of unique keys in *numbers*. ###Code numbers_2 = {'one': 1, 'one': 1, 'three': 3, 'seven': 7, 'two': 2, 'four': 4, 'eight': 8, 'ten': 10, 'five': 5, 'six': 6, 'nine': 9} print(numbers) len(numbers_2) num_keys_2 = len(set(numbers_2)) print(num_keys_2) ###Output _____no_output_____ ###Markdown 4. Find whether 'eleven' is a key in *numbers*. ###Code contains_eleven = numbers.get('eleven') print(contains_eleven) contains_eleven = "eleven" in numbers print(contains_eleven) ###Output False ###Markdown 5. Set a new key and value pair of 'eleven': 11 ###Code numbers['eleven'] = 11 print(numbers) ###Output {'one': 1, 'three': 3, 'seven': 7, 'two': 2, 'four': 4, 'eight': 8, 'ten': 10, 'five': 5, 'six': 6, 'nine': 9, 'eleven': 11} ###Markdown 6. Create and sort a list of keys in *numbers* ###Code sorted_keys = sorted(numbers.keys()) print(sorted_keys) ###Output ['eight', 'eleven', 'five', 'four', 'nine', 'one', 'seven', 'six', 'ten', 'three', 'two'] ###Markdown 7. Get the first element in the sorted list of keys ###Code print(sorted_keys[0]) ###Output eight ###Markdown 8. Find the highest value in the list of keys ###Code print(sorted_keys[-1]) ###Output two ###Markdown 9. Create and sort a list of values in *numbers* ###Code sorted_values = sorted(numbers.values()) print(sorted_values) ###Output [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] ###Markdown 10. Get the first element in the sorted list of values. ###Code print(sorted_values[0]) ###Output 1 ###Markdown 11. Access the value of 'nine' ###Code numbers['nine'] ###Output _____no_output_____
CXRlabeler/withoutLM-mimic-cxr.ipynb
###Markdown Classification Model MIMIC-CXR Dataset ###Code from fastai.basics import * from fastai.text.all import * import warnings warnings.filterwarnings('ignore') # Read in the train and test sets. path = Path('/home/jupyter/data/mimic-cxr') df_lm = pd.read_csv(path/"lm.csv") df_cl = pd.read_csv(path/"labels.csv") df_train = pd.read_csv(path/"train.csv") df_test = pd.read_csv(path/"test.csv") ###Output _____no_output_____ ###Markdown Multi-Label Classifier ###Code # fix result def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 42 seed_everything(SEED) ###Output _____no_output_____ ###Markdown 1. Data Block ###Code labels = ["Atelectasis", "Cardiomegaly", "Consolidation", "Edema", "Enlarged Cardiomediastinum", "Fracture", "Lung Lesion", "Lung Opacity", "No Finding", "Pleural Effusion", "Pleural Other", "Pneumonia", "Pneumothorax", "Support Devices"] bs_cl = (TextBlock.from_df('reports'), MultiCategoryBlock(encoded=True, vocab=labels)) db_cl = DataBlock(blocks=bs_cl, get_x=ColReader('text'), get_y=ColReader(labels), splitter=ColSplitter('is_valid')) db_cl.summary(df_cl.iloc[:100]) ###Output Setting-up type transforms pipelines Collecting items from dicom_id \ 0 02aa804e-bde0afdd-112c0b34-7bc16630-4e384014 1 2a2277a9-b0ded155-c0de8eb9-c124d10e-82c5caab 2 68b5c4b1-227d0485-9cc38c3f-7b84ab51-4b472714 3 096052b7-d256dc40-453a102b-fa7d01c6-1b22c6b4 4 8959e402-2175d68d-edba5a6c-baab51c3-9359f700 .. ... 95 325f2526-1ea870c1-06d8ff34-1b02764d-9e336cbc 96 38a433f3-1d000dff-a774352f-35c0d838-353e023f 97 4a25692b-e596ad27-5bc2eba3-e518093c-623f4d6a 98 0d24804d-197942ca-7f32a773-b93ba943-40022beb 99 a664e3c4-97f37598-e008ddb5-674d8b24-8a49114f reports \ 0 No acute cardiopulmonary process. 1 No acute cardiopulmonary abnormality. 2 No acute intrathoracic process. 3 Focal consolidation at the left lung base, possibly representing aspiration or\n pneumonia.\n \n Central vascular engorgement. 4 No evidence of acute cardiopulmonary process. .. ... 95 Frontal and lateral views of the chest were obtained. Left basilar\n atelectasis is seen. There is left basilar and left mid lung\n atelectasis/scarring. Chain sutures are noted overlying the right\n upper-to-mid hemithorax. There is subtle focal patchy opacity projecting over\n the right lateral lower chest, which in the same location on the lateral view,\n appeared to be a linear opacity dating back to ___. Finding could\n represent atelectasis/scarring; however, on the current study, it appears more\n amorphous and a small focus of infection is not excluded. The cardiac and\n medi... 96 No pneumonia. 97 1. The left subclavian PICC line now has its tip in the distal SVC. Overall,\n cardiac and mediastinal contours are likely unchanged given differences in\n positioning. There is increased prominence of the pulmonary vasculature and\n indistinctness in the perihilar region consistent with interval appearance of\n mild interstitial and perihilar edema. No pleural effusions. No\n pneumothorax. Surgical chain sutures are again seen in the right upper lobe\n consistent with prior surgery. This is some fullness to the right suprahilar\n region which is unchanged and likely corresponds to ... 98 Fullness in the right lower paratracheal region of the mediastinum is\n comparable to the appearance in ___ when a chest CT scan showed no\n appreciable adenopathy in the mediastinum, instead a distended azygos vein. \n There was adenopathy in the adjacent right hilus, and the appearance of that\n structure is stable over these 3 examinations. Aside from small areas of\n linear scarring, lungs are clear. There is no edema or pneumonia and no\n appreciable pleural effusion. Heart size is normal. 99 As compared to the previous radiograph, the lung volumes have slightly\n decreased. There is minimal fluid overload in both the vascular and\n interstitial compartment. Normal size of the cardiac silhouette. Moderate\n tortuosity of the thoracic aorta. No pleural effusions. No pneumonia. Atelectasis Cardiomegaly Consolidation Edema \ 0 0 0 0 0 1 0 0 0 0 2 0 0 0 0 3 0 0 1 0 4 0 0 0 0 .. ... ... ... ... 95 1 0 0 0 96 0 0 0 0 97 0 0 1 1 98 0 0 0 0 99 0 0 0 0 Enlarged Cardiomediastinum Fracture Lung Lesion Lung Opacity \ 0 0 0 0 0 1 0 0 0 0 2 0 0 0 0 3 0 0 0 0 4 0 0 0 0 .. ... ... ... ... 95 0 0 0 1 96 0 0 0 0 97 0 0 0 0 98 1 0 0 0 99 0 0 0 0 No Finding Pleural Effusion Pleural Other Pneumonia Pneumothorax \ 0 1 0 0 0 0 1 1 0 0 0 0 2 1 0 0 0 0 3 0 0 0 0 0 4 1 0 0 0 0 .. ... ... ... ... ... 95 0 0 0 1 0 96 1 0 0 0 0 97 0 0 0 0 0 98 0 0 0 0 0 99 1 0 0 0 0 Support Devices is_valid 0 0 False 1 0 False 2 0 False 3 0 False 4 0 False .. ... ... 95 0 False 96 0 False 97 1 False 98 0 False 99 0 False [100 rows x 17 columns] Found 100 items 2 datasets of sizes 100,0 Setting up Pipeline: ColReader -- {'cols': 'text', 'pref': '', 'suff': '', 'label_delim': None} -> Tokenizer -> Numericalize ###Markdown 2. Data Loader ###Code dl_cl = db_cl.dataloaders(df_cl) dl_cl.show_batch() ###Output _____no_output_____ ###Markdown 3. Training ###Code text_classifier_learner?? loss_func = BCEWithLogitsLossFlat(thresh=0.8) metrics = [partial(accuracy_multi, thresh=0.8), F1ScoreMulti(average='macro'), PrecisionMulti (average='macro'), RecallMulti (average='macro'), RocAucMulti (average='macro')] learn_cl = text_classifier_learner(dl_cl, AWD_LSTM, metrics=metrics, loss_func=loss_func) learn_cl.to_fp16() learn_cl.fine_tune(10) ###Output _____no_output_____
1601_bounding_box_regression.ipynb
###Markdown ![logo_jupyter.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAABcCAYAAABA4uO3AAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEbAAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAZCgAwAEAAAAAQAAAFwAAAAAD7LUsAAAAAlwSFlzAAALEwAACxMBAJqcGAAAAVlpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTMInWQAAQABJREFUeAHsnQeAXVWZ+M+5r01LmSRTEyCRgJgoFjqiSRRUsOMSXCkpgLGgruta/uiaia6srq676qLSUlBxJSKKHRRCs0EAS6JCFhIymZoymf7evHvv//d99943b2beTGYmIQR4J5lX7jv1O9/52vnOd4wppiIEihAoQqAIgSIEihAoQqAIgSIEihAoQuBwQcAeroYOuh3ft2aNsWYBf1s30e/Fxiyi1ns2GbNgsW+2Gt+s5s9a/6DbKlZQhEARAkUIFCFwQAgcmQxEmMVG45h9/EmqNJ5Zat0DjkYyNPiOqdscM+YkY5o3wVQWu0WmMi7IFTMVIVCEQBECE4LAkcFAGhpgFIscs2gxGgXMosF6hUZRf21TmZew5U7cL816sQR8xokns5mB7kzGxMu7d6+c1V2QWQhD2mRgKqQx6i/UZvFZEQJFCBQhUIRAYQg8swzklltiC83C2JalCzP53au6pa0i1uO+yLfeibCTE411jjW+NwfmMMMYf4oxNsU7DMFaDFpZDFdZynfzt5/nTcY4243v/8U4zp/8pP1L27tqWvPrN9c+lDDHn+SbJVbKFVMRAkUIFCFQhMAkIPDMMBAYh9n3AsesOnkg6nPt2uYFvvHPNpY/Y082sUSdU1KuPMJ3sV550HoXXuGjnPhsc8ifJB0BfMSBnzgoMrE4n+NBuYG08dN9nWT+M/nudnxzR/Pcut/mGIf0o+oCa5YYGijunQQALb4WIVCEQBEC44PA4WUgd/tx046+EO5nVF/fWmPj7jsh8Esh4Kc5FZUxNA3jZ/qNP5CGQ4iGIG/8Y3ucIUV/hUYH/+Gxg/lLP8kX65hYLGaTpcbGEsbvR0nJZh6DV9zu2fi32pbV/ClXkWgl7z4pW9AElstU/FCEQBECRQgUIRBB4PAwkGhjO9Q4atbtfAmqwgfoxDud8ulTfNEs0r2iVaCRqGoh+xXBnoXyDGEGE00yNC0n+ymyAS8sKGETpdYmUsbrFWuXf6e19isty+t+qrVHeyVF05aCo/hShEARAkUIjAWBp5+BXOsnzCqrpqqam1pebFz/k0j5MI5pEHGsSy7cQ5JVj6vA62qsHh/Ub2gnll2VQENJ2FK2U0h+X9eDsJqr21bU/1AfiDbS/BPXNDQU3MzXPMWXIgSKEChC4HkOgaePgdwim9wkzFWVt+ydluzt+zfj2yvROIzX0yEMIwMhT5Dj6euDdmCMF9l8d7CPpcrZOMFK1tf9a8/1Ptp++exHtFQe8xujluJPRQgUIVCEwPMSAk8P8c4jvDXrdrHHYb/C/ka1Mg7fw+PKwjig2IGJ6UgAvGhIji2dGvPTPQaT2n+0VTxwlVm61F3YsCW5pWGol9iR0OFiH4oQKEKgCIFnGgKHnIEsvAWCi1tu1TVtFU6Zez2mqncqUc4OiKsu7lHh4cBneuQj28eKxaa99eNOxUzrde75KyrUxU0r6x82ok3JSfdRzqeMrKr4pAiBIgSKEHjuQ+DQMRDZgL5uc1xcc2vXNZ/CNsOtTsWMo7yuvRBlMVNZ6PFkNsMP8ySIvxeb+bakIineYNbPXtmyfPY12ouGu+OmYUnx7MhhnpJic0UIFCFwZELg0DAQYR4SeoT9juobGy+18cQG/sQdNw3rSEKSD007hxOGukdjk04FezZde29oXTn7Cm1eXJGLXlqHcyaKbRUhUITAEQqBg/d6EhddSTCPmrVNn4LgbhBPXD+Tln2F1LOSech4fBgfh1K8rn1ZZ+qsy2vWNd2hJ9iFeYiXVjEVIVCEQBECz3MIHJxmIMxjtbAI69eubfqSnTrzI0jrLgxEjFYHz5yOnMlJY45LMbaHYgNmUdOq+l5lInkn6Y+crhZ7UoRAEQJFCBweCEyeyAuTWLARRjGEechBwOca85CZSHnde/vxJDs5mzT3LbzFT2oYFtkTOZgk5UWbkb/I7flg6iuWLUKgCIFnPwSEtgpNEHO5/B3BafIaSLgXUL2ueXVsSmWD16mb5XL2Y/J1HsGACruW5hxLyu/tuL9lef2r9JnE08Ld99B0HcQpxuQ6NKAs1lKEwLMRAmq9efbE5ZsUsY9cdTnjsRKCeqPXs18IqGgzk6rvWTbPaWfKTNFIbmldXnehmutkAJO4yKpmbeNriEg/3eHFt7axdUXt70IYPgvc1Z5ls1bsbhECRzoEQuZBYNkqz/FfTQAmz3FsumXH3l8ZOYt2BDKXiatHHBLcstRmqjc0nmFs8kYNRxJMzPOBeQiLTHpduzPOlFlL0b62tlm7xsjBSWM0XMsBcTQfCZzYDbEps+bZeNy4e5rvpuxruBDLsq80KYZ0wLaLGYoQKELgyIVAcGdRFubx8ti0mu8bool7vR2Z2fOm1e8yZo/ZuFGE9ENk7Tg0YJjYHoiYa4hrdfR3OiqtF/uBjWGx8nw5FyGmq+dHUpdkm/C69nhOSVlD7frGN2qsr4CJTAwGvrcfTQY34X3CmAgMJmmNfC6mIgSKEHi+QeCeYMCOawdw2JGQTz6R+/bF4hVHbEy+8TMQkZy3XqCmlXSmZwMnzGv9TF8G083EtZhnP2IIiSeS1gBapbOh/uamWcpEJrgRjrcBgR0BqdxxIvUVUxECRQg8fyGwKBg65mxrudfIODFIhB/3+rqOWJFy3Axk4catCQnlUbOh6XKnvPLNcEdMNlbOSjxfJzzmZzOyqT7TTZvrAyBs5E02wseXPN+vjlXWGKdypkBxRlBq9fMYpOODWzFXEQLPbQhEwS6gCpPYWz2csBmf9oCYvcXaTNW6J2uNZ/9TYluRxs18DueADmtbsh/S25G1ZdPeVrN214WtS2d/LzgfMsZ+SB5C2Jhdk93bUucYL+YYZ4v2vQFe0vDs8cI4rPAuNlaEQBECRxQExsdANimz8KxJfZ6T5lOx20tEXbSP53mS/RAJKjzQL5LCF+fcsvP2xqVH9Y3XW6J1Wf0NQyCYv8E+5IfilyIEihAoQuDIg8CBGUh43qNmbctpeAst87r3YbA/wpmHBkQUKZ7/g0lMS+M2Lw0WO8An38S5fjfDIcOjst0dHyd3g1mDuc9w38mBkhwkrJsS9KnyJC67skeUh8WBul/8vQiBIgSe3xAYm4GIRLwGPwCStV6DSZQgbWfkVr/Dab6iffH0Gk77hT9w57nVEPHRLArHyNJj3XxiEyp8zmPPk99CV1txux1eX1SFjLdQe7nfpaAwiDBJtUY2uuT9w1Xr2r7ZvqK6RU+Wh3e/yw8F06GK7KvztCkWMKOTaGqzMcKQlrJJP4yLFuyHPJQ6JCDmvs3MbVhHc5dvVi92mXwd5Khlx/uDxk3b5Az2k4LHw+XbN/pm6QX09RC1E/VnxJjC9u5hjp/u0PxBjDjGuhl8eZrgOWLeI3gC06WCx4cYnuIkUsXCeSxcPLm5O1QHaaOJ431UXHmaxiZN5+PL8Sf5Ewqa2tDAulmUh9usQanj6cQ1gdEi1uzhmI+8qcn/ODYDkfDsDScPVN3UdBZKxxv8vk4x2kRUOb+ep+uzx/3ljoRWDzyVIqIPPYN3sIkt19EKcZMf9DCjLZuasPzm9bNP42bE1CbPkzZZGuMv4buU6e0WFiPlho9llPbIKdnFXOVxuqeH+9SHJse42QG0kKmme5/c9f7JgBAfwGe7wY8vXLBVmfEWs8CVgJRDq+UbWsrCBdWF8wgC1TEGq1cGRztvg1XIgrgOZrcKpjoaMRlah7Q/tA8NPBEX5WaeT4boRvVXQtCC8QlTG5m0r7TzbtpBWhmZYQJPhNDtY2EFcBk5JqlKQkUIgxzOxOmHOoyQZctWXhoWiLOI4Mr4UhR6IojYPHIcDVQj8ByEx/jqzc+l44PRjzbvUd6DbUfqiebv3eDQaBry0Dzjh1XUz+g9wNd4HmxGwi/IS6iPCcJw/OuoML5EfRz+HlkRgrh4hfvLHjLrMGaa1xyaa7IjRj4ajkXtBXM2+fkYPtYC38dmIJVPKEBYzh+yZWVCsEWCPzx7H3hA22TK8TL9f3cG0nfj71qC4qP94Se5iDaNu9vRuLudC/HO2FRpEi0D5rD/19D6HzJlj/jxRGs8nR3wHVvu9ffNsem+U+n/W21J2ckCCz/dK+MRGMAZrGsTyRjmqC0mm74XD6nSqD3NK23iSEDdleQ9n2cQbgk7rMyLR4jpwrSMuXza13d8af+qY/bp4huN6MpisTYb7JxLMVLwbHDCR8sjeeXQUR4C1d/UdDTewFWe58T9mO0zmZ5WDjm2kjPQugTp8hmULla5v0WZj8J11rXb62wyXgO3KnGsMwDr2d18Ud1TYR6jcXmCNqUHYychKiId5fVx7ronS9LJVE02E690fC/lxD083v2uXqestcNa7jmmr6t4hbHyOnEtQcYoKRinMsKam1qqMSZW24RfZgZQTuPJPW3pnYzp5AAukeQojCSEP3MyzPzIXB2IiUifG2B+IXwkWsPu7oo5jpOo5DgxI82mOTjUvmfZrF05eMq5KnNB1F/t+pgvQhg2gm+58fl29vW7ZmfiZpbjx1NgoGdjfjewbdu1bM6ewXYELngITiTkjsCl7k167os+eTIvszc0zswOeHXc3VkuDuhOLNbZlylp3rfKikSleZQxv/skYTaDeDzmoPhR2lq0WhiiCEE6LwK/fd21NVk/O4NlV8JpgdHHdqDL3kZbR9LH0ESv/eel+ubWGn8gWx/znUzL8rohy5OfB5MwDrNYcDQQ3JjLus6z5iC7znStl5T1k826+026s6Xd2m4KBsxF2rtnDeUagu+DNR74074XOAiUMv/a5pwv7yztn+YcHY8706Swzfhd6dLUrr3WypmyYD4Gx3fg+ieRQyT3wkkIAMSvat3O4xwT+ysSf0yim5NZpeHChQ7lU19Oe3Pqe+/XWlfUfbBQzbUbmk81TuL3Jo6C0te9Fa7ynuYV9fcVypv/rHpd6znWZL+EO/KJXu9+IRyMzbi2bIqYou5tW1G/KD//8M8165u22kTJiwhZD5EKiVaQSbSQhNvV8b62lXXfyEPOoVWEhKp6XdPbmIAFkARkbnt/y8q6e3JMJ8yDd9dbMcUtgDnGbNze13LpYB6I4zzjuqvgXa9jtc4FhaZof7gQi89iU/s749rQurL+Ou1AhExKNFezcGV+n6y1fvLdaG3nAYdj6YsgY0IXs296qa8JHvnjeMr/j6Z31e9W4h4tGq20wEses5q5YefsmGcvBH/OZrkeT+5Z9K0MehwHxYX94oHgwzzsk3y+m8X3rd3L5jymtU4kzphoFCFT4E6alzqOcym1L4H0H01dFfyJ4AOYbA9vHOw1D1jP+07LZXPu5rORAJkSYUEYjs26y+mvT8oOJHtu3HvxcZ0Rc5G8I1Je27Xrdp4Hnb+YPKcAwxreGauoy2IW9bsxu+6iE/fZuLOh9dLa32tdeeVH1B09EIIVaks161pOZy2uAF6v5Od66qtAOY4zOj6aNGOEoPtP8X63idlv085ftJq8OqJqC77nzV/19U/UOInSZeDfm7EYz6fe6ZRJ8SdtMXfoetb+jXH9KG1S3+pYUdmhsFpD70YTniiUS3ltyXkqL22XIiC+gSXxQoZTzRjKySsCRTg2I8xqB7T/Pg9caVt21J+0rrx69Hv0Eq4jaMW5zOcrBEaIfZvbVtb/Mlqfs7/dOAf7wXtp4o20d5QtnTLD9HY90rKy/hU8Gyk85M1X3fqmV2MdXw4MzqBumKsp5136iyZtcKgBPsb8jb+f+tnY/7ZdUSNCnSlo4g7Xp4Q4sqnyX/tZptL3dqd6B45/6n0IpGGqvWHnqV4s9j46dhaPaulzadBP5sP3ZT62YGD5oVueuKl9aXW3WDEi3InqOFTvMtDCSUwjcDE48cV2yoxY4Hl1mLQP7RHgkUN2aB7ydf5XH09t29vomgWL/fkt2+LbPngc5/yzZXYansX7mn8Gk3kTgBP0QF3cLH2PbJCYJvn8GLZosUkuNi6S+Z08eSmhSP7HKZvyfr+3U0xaMb+/y3fiyVfLjYotK+oeVKKSM+lsNHMaz0g2/jNeVp7/a5sohYH0C0MN2uIDyMIFhpytdMxyvn1D2pLHQxMIKYtLZ9y/MlZV/1r5Pdvecg1v96jULigehS2w9v2xGTXnCEq6bc3CCGAy1qte3/xpk3WvkuCOhkNHMBFqpDsKMi8J8s1k0Z8JIp5Zu3bXe5wB+7amJfYpVf1V62gwxNy5EoBdjdlvinWQC6I9owDuce6Gn0o4halOquwEt6fjCpjZBa0rLXF5IP6jMRH5TSQk3quPabkaJe1KFmSpzGVwOIp2oiTsQwis75UBt3rjxF8Z6+v8FAz6v1qX1X2EQblmPEwkXNASQ8i3/n8Bj4tMCrqT6VO5BzMoa4x2oUq8w2TtCZgiT2C+L6tZ13wX3/8J5vFn6ZbNDMy1lTVfUNyjfKIn80Med+r+UA4XJCdJiJPM01Ju4WRR+/H4f5tk2RkyFfSd8cpWmXzRxA1rXiUsqZK5erHJZt7LOG9z3eyVuy87uilw/w61oqhE9B4SAJhbuXG962wy+S5gJe0btGZy5dqQD6W+ly3lkGstcDgVLfvj3NPz7VRq4INPXaRa8ehzJ+1FQgYf6d//A5/+nymZMoW1BnownghHgkbLaKsMC8Bs2nptqrdzNXj5r6yvb1DcP+Dc5RH96nWN/+qm7UdsacU0UewFVxQnpU9B0rGB06XgSi39OM1m+v6FeHxrE+XelY1L0brz6osK5ebN8y+LVde9Q9aH297yPX7/pWiLEs+P88DfYB0lgVVgZXGRKR0rzHFkCuAzMOvGXS+MOfarfrzkdYrX5JRL9PLmQgTuCvpawftRCJzn4PL/b9Cc/2hbUfdZ1SIL9Xd4iyzQ/oQrk6xzU7Oj+T9943wwVo6cJzitiUHpmmXuswOl4HY9eHGO073/09U3Nr237bL62wrCJix9MG+jMBAWhhAZFoi3rulCJ4Mg+kxdSQsAZYDKPBqWCEH2t4kaR/JMbIbd1/zn1hX1bzQrQilSzEyRuiiZCiWxnzI+JvJKJrTVKZ36GfZS5B4T15aUJ/E0ExPVg4aL0M3ShTkm0HjtQ0iRQMJ3fsZeypV8jGZQHgvuxDFj+bJw625sOrnZ2odGcn/pug5JwLvP69gbFvVFYxD2gGbM3wXBqX8I0T63Y7cip++JpM7CXtd0G/eTvE0kFA+kh1igIZpHqHY3xCkJyI4i26nsHVXB+AlDP+Plbs++++fcsPNljZdbbbB6bdPX7ZTK9xopL/s6fZ1ttPUH8BDpjpp8rwrh60SbKnkRh0Y9G08iefp3It2f2XaZ/W0+oZE+aQoWRDYg5E2/cqZWneh17mbFsiBhrBChx0D+v1NPCy30AYmE55gqzCEnmFjsxTamoHadaTUfhrC/rLXBP1sXWqgNR80MeQ/nsmZd4+l0+ifOlBkzCTOT8fu7kzYFb+rv2k3EhEcosx2A9/EOs7THMc5XwDjLnJTzGsb3KER2FZraDRwB9rzOPXRE56g35iVz8z+kXZFM9RbOpS4awQdMPP5VVdJZKzBt2u3eTRWbqYV2DeZWfyos5TjqOIl5KQEePu2/Pdbb9drq9bvOa1s++4FB5p7XUgjT6ev2TTdu3wNcbraA/g3YVCzBeax+k+55EEXu/5j3ToEnMkQt87fQxBNoe7TueQPOtFkX9+9vX1x3Q9NZzZfbHSNxMmxPGPESOyBtldi+H9uKmWcBS+YPy63MYab3L8AQbcZpp2ZEDr8OM/KJNp6ab1zorXVmxioqv87cLUKge6eazEYTAPS5dXF9L830xH4Rm1b1am9/u+Ay3QYf+7ufoFdI7n4zeNmDTBXHpDyTAb2Qhl7Gfib9chlbzcqB/W2vqLql7VXtSzEVjdIe/e3wOhDi1Yhi0EIxVzHn4Ms3WSO+1y3nuSri4HkCodK4e5uh0MNSTkNoQaP2bobZOVg+oIzwiP6e/WhOmwE5/bYgAZqIMWj09hXM99Qw4GwF4/xM7bqmc9ze+HlBf7Fg5JuW85uk04w/0Tbtod2iCdonm37J/L9UmDmWmTR4/CB4/HfakvGUkX229e1LMdHX6z5tPF4TK6n4Qe36XVe2LLXXFFyz+e1N4nNhBnKt2MbNQNXa5lc6ydQLR0jak2joEBVRkEJhEbXBBd9sT/r+BVo3hERMELo46hZb07zVmkULPIjxSC8iYY6CaFurrEgDEOTZSCCrYBxZuQed9DYWylVyeNKIuSeyV4ptdxV6u9P/21S/3w6yVcHxhdgMaiHGZp2SioSX7RAm9JCRvgxJ4RB4xkKPG5HqhGIP9OfVMVgAaSwOMUDSLDG2v7tLNA9nes3b/G4sBZm+35Lz061P1d8lWslgKWNYUBWx7q6PIn1/2uvZ10cE4aMGunZ/nTzvrF6769OxqTPeK8gPI9xN6w1pk/lOx/J5yqDy68F9G83OrIfgzRSCjGn3Bn5fGNj6IaLR3oAQeVkI8u40/9IpnynMo48ypSyu2zEXfa7l2DkPR3sE+W3IZ0wBJ3kD3hdg4K/19rV2O5U1S6pt01fbjIFRbxJGPWR8Wj6UButuajqZ3ZQHbLLEYWH1cLCz3O/p3IHTx2dSyfLbnrpoOpRjaKq5vmUe5sr3AN8P0ccU0uP1aFhdtPI73QtzIWQukh87YQUTxqFAgm36hDOl8t+97j1Za1OYkbx97D99OpF1b268/KhAOsirAO12rpfueS+1fhShBcEgPhUGex/jP6V5ud08ZJGr+SUYd8r2fQ+TLsxjdy/jLAOm1yGkf67p0vqn8qoPPgKXqieaT7ex7FXEaztXykDY52CuvR28fhn4hiCUN3dSKmBUA5XX/t+0lOn9vS2rPB7mkYb4pUx/9y/Z8FjTtrxe8G1ogulUm/olwHGNLZlyOmX6nWlVF6K9JFuX15+vTETHkb8nQtsh0Rzojd0em6rMo5e5Z1y9d8GYGiq6ev6gVoahrem32vXNC/1032fJ/3avo63bmV79MrO/bS0/Lg3DLcmkDS604EtcNEKboHY30z3rxvZ69ouUedhUBfuX3R7r4Ucw+Qey/d0teMXs1MYi/A6ZK9r8ClNesRarhVzZLTVn/HR3g5/tX9t2xQsC81RQUF9lT8X0da3gSwNrMebtb+typlW/ypq2n/JskcJhBHykKN0XbQ9xpq73rFd6cf/bMMujKC+RTr7pxvz/3r1sdmDqlexhEi0VofIdAODLPJqJcOTa0qn/A7Pc1rbE/nJMTTeqZALvhRmIuh2KiOG/SYiGf/hdd8ceghBLgI64/bBm1M3FjWxMQbwKmVZCyWFIpbKhKIuG1LrjuvfVzn33EqSa4/2BviwTfULdhuZXNRtzr6lbTZ6GgHiJiQzG07F0XgdSxH3A5nw/2yESqtYT1O87Im1DlN/A96tUkxtNghZJP/wflC34itAek8BqMiFvZaG+iMUjDOcr3NP+T7kSMpatm7jkazF1bjRq+zRmdc36FgdJ91NIrZjp7FvZd3kPyPUxlSizmUfdeOK83ZdUMVSS9DMg1uzMiJAJbJbW/gT7MXZ9+3sQc8Apm7qABbyUDcZbTChoSD41vYHuNcc0f9ZOmfVyiFYPxKecxfUp+vk5zSMv0sYCwBP19QIAwA5Q8/L6zfx6NnC9FcnufJgIBNm+H+ZwTfOl9X8doYKLAIAJYsa390z1BjI/EuYBQ+1BEChHEPhu60DzsmhPpECbXqu1T9LexyHctzCuW01s6jEwwxvQiD6M2a4PLbIUvwqFgXR7SBKtB0kdTesfbPnUf2esafqcggD9NlZi39L0rjrULtLIsXot1m7nl49X3bDrf5nWn7GtWIsgYr2B/h9VXdN2QvsSpOhIaBFHCQSS6vVN73DKpr2OdvqAKUS2+5NoS1dLE5p3wepBeMqGMnBBlr+fX89TgaO0Yg3404MQcSLfPwxT/rK51qiQqHUEBEw1rWSy7Ie2fNrxwDANTFO09U9o+F/RfPIS4Zl8Xr1YcGqA+u7g2x0IYp+DqF+FJtGHpPx2tPt/VXONeHNGzhxSLmyb/B9yKmae7XW292DmLIeh/jdtfViyaMqHnzwQnMTdu2W53cK382FS14Lb74ao9rN2L0CTe2XbcvvACFzRygIGhhYvLCEbiw18wSmrxJGmD+bR9b8EMPxE8+X1OzTr8JfQRFp1U9tZ4IUwDxxuUjjcZJ5wHf/cHCHP76+swwuMFzqyfH7WhvYfxLLpO+nn0cL0YtOrXs34r2K8V2NyHwofaR+h0ogQY80MiM8Pbdn0Snd/K0QgvrR1eY2Y4CEdMGLRgqO1xNy3Xip7fOYmtM17PJN9kHqqMJdKdd/AJH9CIGTnCcVa0eRfRjIQAYLayKnUOkt8GYTIf4WX0uRbPtiSwX4HUCatQUJtWKrmpVkbGo+3noO7gp9A5W1pyex6VBa7LubVjELKRQkpKLrbxKy7/FNI+reYbD/mgbI4ROkfyHavnlOI8st71QXaJpX8gg/nUyMSN6/UHGazKplY58SaG1pe3Ho5G5iLFIIjJeio3qhk9H3Yu7ol+94ABPVkp3wqIZ733wDiKfPI9X+4Ghyadmj58yD85UjVtWhLwMRco8S2t/PJgURq0d6LZ3aKd9T25XPTwEb6OKSf+tuyuj+wWL/tlJRfjFSOUKRa3y2mmT0lSYIzYk/+xrZqvr1fzB5KEHo7v6vMQ4i9eJDIJvcwTUnL86LtrJjXH3fdKwb6u5cAzUqYgYSzfgc//5ueP4gyy/vWYB6SA+kv2Ckz69EAugnwWQFs1tHmSsmi+2YfmC+rZ8iY5Dfp8/xTTWLbeXYzi20R1xI8iOpfhR38ahYmMzqIJpo/egklddls913/BpFEwRcIbc8faPdMyXagducesz25fcXsR6jjTOD5R99zyzEzzvbN3s9T/Er1fhKhpR2sIiE0LPOBO4ymFNPOwzh5BMwjJGwMRrINTfJb80kuBPUzECoxo7wV2OBAhQ7ts08E4VcCJOvhOpgJBL523a6Ps9+5mPnrgyGW+n09V4hZTwmynP9gjiPNQRtrCJsUXMNlFKb8SdqSub+KKw9EAPhM3fVN32++Ik8AENjStngQ0YGPSJ+AnzCPO3PMI8LdUXBF4cseaGtZ3ftqeppfD506RhxpTKb3H+nRA6O50MteBYxRYPoGIDtHYIqw8InWFbO/oCMRQfMxxjl4jirQqKGH0maXm92gWoHjSBy8ZtPfc8bu985vG7Z+QqCEbwgDc6ZelmpcVvVYzfXbF4NWfzQxZwoCnWT4KJ5t16rHnDCDNflFlSjQQVth2e/Cu3QAefPc1hU1v9Px1z4SuVUr48+VpJ7665pLm2CGCB7vdVLl30cI6GctzWvvab6QfN9Sj7dIKM4VnNwHYQ3D0iZ9xqI6BgR7MW6twjx0NMMyPvNfBfnXbIqJh0HVusaXIV3fE/Ocv8aSpT/HDn27H4v9oSZZ/zj27cvzCNeQsWxZykUtpJYds2/FpPF3S2EALpz/TYIYumiEQEYpXNSeiW9CqhvAdg9WBgs9yqLclo0SP2XzNJPBXyf0id4KxoM8vqjg2Da3t6br3qd1QCSi/o+oU4QAFgQeOD1UcSduzmTBuKymC/idZ94TMA+/ZDuEm0VdkGJunztXGTOIf6u0IfsuEKET1QQo2p4gfqS1lJTiPVKG7Ri1nj0lpLrPar/2XRAwD/1S+EX7AOEIzD7+bUiy9BbhxfdP1RJy2DBKQsRpu3p967Gg5mWYrTi/U1rBfDzKJV/KPGTsagIZZVyCDzCPtBAykTwhditoC0AjsWGL0s+F0F6kPRLMYzUMaxr58Hnr6846STFZBsxDHDzGaFfGKrZ/5uZJirwHgutA2AgVZK+AyMxTRivEDKFAcdDYE9Gu8CPDjGnNz6QdERxyGpY8GJ6EWdeFziTG+byWd7P9DHE+2jXeRSQ95wVugyviws1UfgpGLHszYna8XpiHtoMkretgeBvR90DgVBSFCXySuf89+yJx3QuKswkvSRiQpE0qTJmBmclXwhCPEhd8BBuw0l+jv0fMQ78UfpF5FSKqzMyamwVXJJwQ9v+TtYSYmkckwVNJ4qFoFkBQp5q+zm/iLPMFNXtLu8IgZSwCu0ggC2HYNXXKP8PkXwA97GdsYlq6tBXmIfgz1vpBQ/TE8Ubn+4q5T6rJtGSKIJAQ9ekDnhOY4GUuFgynI9Jd3JqxAgG9j7eumPO7uev8EsXr0Vyywbum5uvUDt+2/brbTH/XY2hNJepA65uzBQKHMg0SxqjWRYv1EzY3NmPKxcNCpLeR+aL8z+S7LDJhHjc2nYUa8AieCa+WvqKWekiSLn33UTWPwUZ6ffW6XQ26qBvuHknUtR4caa35gRKu7ICoxPN6/dSiYHgBU9XPYiIgcdr8cWb1YfLJ13wJlzZxHPPMlrZLav4oP7Ixnv+7Phr3i9I0yc05FVkoYgAAyXUBhW6r8rBQWtgeHFL0fPsnwUCSRx1oSOnf45lxB8TPbl8ubp9jpLDviKh/wz4tEpEAoLo6u2iWlhIvpBBngO8SiI9xyqYngeWfVPKUTHKgaRxpYSX7VpJ8C9z4KBKisfX6TE+q66dBYuR772IRsomkRMFYz/9nyaFET4jBOJJ61TH/mOR+Cs7cDjGnlC82yJFJBAnwrX7trqOAwnIYlhBbOK9dI+c7VDIU5jGOpDHTqA+CezMmlIfAGQepP+nHEpdI8fl/3qZ4msmk2KE1qGICC5LPxUIkPeQoH8ZKIX7MKt/zMOX3OJV1JbGqOodpf01Q7KTI9IgAn3wvce6kLYtWtTdRXv4JybNla5uYqRTnx2qKPF6oyYiI8mndYifoKgb7t876VntdIIjdLZqOJuu5r7YVM3DonpbEVLOj7Zj63+sPcmB1HGlban6wpjz/jyINocUJytTo3Et/VbApUJHYCkTm6+lozXqdH9McYm4KmODQAlIHMFTvN9/7AGVEuyrBu+9WtM1fyX6C4s/QUgW/NS6dIy62tmXl7HV+z/6/mFiyRIRzhIbztADaYo7J5mrAqSdZkmA+nkBo/B95vH3H8DNKucyDH2BaupfGO9N5n9IjhDHW5NGa6WDo0WAr+mlUxsAUvEI4LZgjE1poOQ2r6jB/DW3g9dc2lSHnf1ekHSZYOS+9lXHJArTYKQewyXqocqvZPD5T/aFFvS+UrH9XuInOuaUkALdLg2yLB3OLWi3SiiTf/1XkwpfLAILCtAxugPfoM2lrFFU8V2asD0BeBGMmIiEIjPr7E8m+rXb+uAik5GVDsF08g6USNCxeXdnAM3ri+oDEYY1kNZlMZj9vYl+Vaiqcfg+PGJKYkkKEZH1kvX0tO+nnTpr7uf4u2sIB29CcZsu+voBQOerPzkO+EsEz+DWPiC0JiAxE/I1I/wRJKE0hYT8QnOnw7ZYL5PT4BJKaLcjv+F8VezGNYjMIujJYy8YcsQXwF8K4EAuV2LZn/JIbJN9E5kTrzWkImBXEBTQrVgn7Rq3rg5jeSE7MFRdzJaoQG4HHPHleX1eZI8byffQEPNCyqfd9uLv/P1zBrwJuv9P8lewbCqMFRxntRZjHIJBThejf0Lh02l4lxuHZk9Hrz/slFBTattf9CiHub7qGkqVTHTd7juYSh5JQg5fvXkfrLr+34ykI288CBtPgcBvncMDnNZD38fggH33FFRFeovNlk/vN1JECohYT/AF+jiP7ePLkxt2XndC1sAFNLtI28qrXj4FpTyjgmxEU6uBSGcEPN2b+O8gKAx53ov0gRh4d9zdwXoySMlT/9KprtlQIjYiEhlyVQkvQOsl1qzK4CdCSSHhk3f9NOIcwWdJ0rftg6FGuc8GHkQwkNBVg2kehEtwNJmpYuWf+q5hFSG7CXB4rr5zDZi0LziL6D09C7DnEJR4Yxvm4/ircPl9CCZE6a1J/hRB1sWpTQWwr/414a0wRc8mQ/GETnIu/Q00DEt5FBXN+YJXLBjX5fxVkmwiShRXnv0HzVfqMxyFq5olZJXse059HQ/r8suFnzzgQIF2XDpvvdBA3XNKWfQv0YYEig49Wr9Y8sdIS6tCVKkwsTrdEVMd9iv6FCNm2o/YDrTvq5mJGmiumDP39gP1EylPJHs+h7NEBs/A5bCgeKEIUAjlTq9IXycsIxIuG7y9Whi/2bxuY2BY2EMhynAwrV2ko9bamW+6F8G1DACiwB4KlQU4Qkxxr36JwLBGB3f+pHp4LTU65OsfzQeKVkTzfuxezD2K0TtMCNR8zRoFLY+McrqUz4vGHq3WXwOQSOZzZtKq+V84pqVcNdGb05qhHJF+cHpiTz2OW+nc9bCv4jxeilKtL1p/OmF+AxufKVQ3scX1Pnm+pwotxIkngHpgXpdx9ui/BWQXMNq/WavLCt7TsqGsAV47GU2tey/L6wCQrkvOYcxfiCm1UtbfrWif7TG3ngGSKsoJPvp9Q70PHU0FsywJCCI2Wmjcp7kNl3iqoj6deEu39r+2X1t+vRQppLaPVJc81LI7Md/Jat2PPK/1M5gxccd80zU2qwLMttX8ovDkII1oKCL8pqHbitISye8JxUwVn6nT9BLUditeRUkxoWwPrjlVCGLCvQ9HWoatDkJ+j5FqhNW8f7OdoTeCwp/sa/jmyONUXfoGEkQgkO457KKLsOXpma/WTzU1OAtfl/p40UkdNor/rdeS7Ndpo1BZE0lrFcdxY+g99A5ybiCePYVMtmHziO1B2v405v9G841TJNe9oL8LIRRv0erbpnocS3AIbw6OVzz23cRhemk35bfpIFvSEkogTsA6IO25DUO1hKZJsGoY8FwaBdxgzJvsHixYHPwrTFtu6EIwGhb/XbgweSOhupvkdYYj8IRWFX0Rw8OIm/UIO7ZXDQITo4aYWwHtLXajFFCo52jPptzAAnC38dU0PoPLPZ3N1KGwktlZDw0D19YS6MO7LDC6cgbnL3iXV1j/WnGxquBstZfForYx4Pv/PJsZEuPCrRjejYWcIJZOqcDPpE8i8I9hsn9dv1+76DeatF/riopwqr4z3996Lx9GleNQ8kKtUCLfsMwhcBZ9X8xcRY4Ux5qMo8rMIUJic0DD0EhoY2CLRmjH5xmAiT1TY9NbdUp9oZgKXCaQ57Y2JRsMJbE96AQiDcxcv0SpEmNC1K3Me4m/DsMoFtw+MKwwTXJHkm39UbyW6Kl/GShzzQtCwKdZqe0my4m+aN1z7I8oFa4wDsWgoxjlFBEUcEdAS99+reUUbOIAJeUSdIhQwfrxHkQRMQB/yM0lstigJszN+HIEGhcd/Qh+HWleUZTzvBGDKiI+PwAaiGcBWC7ImpT8HmYYiRzi5InVbM1AXTIxIfEdYWiPYYj0Oxs2AdS9ksQtiBBJGwa6yGOAyMAQ8S7pOI8uOyItHs8tia+AT6ryzrqkN6feFuvioEgPCO/nlVpOTRvgmC5KFtX2J7cdVcRMntZepq7P8lCqP4zHxYOuls9tAFvE4GUqIyDOpJMe2fNsUlN2kRHRC9QhCytFe3+/IenG1o5utayaBQLJQQUffHQ0vgpWs4VIuYGNwlCCRYedlE9KdGqv2HO9YzAuncW7sUoMbNZIX8c3KkiJ+DUmi8ZD8WPwFIpXDaCSQZafvxrZrvlCq188TeBGVfwv5QaLNTPsyBYysh6A5Mze5XRoYiMXd4/1EcgpEiGOJnB0w6TulGdEI5H0iCeahQpCEiMF7qZX5qRNp2hnIHCv1xGvmBvPjuV/GeWIFWnQ5+1Cclyh9gXXd+3F5/gEduC4+YO5rWsrhtSHJd2AQCd3DEDNUAVOUBvCkDCFKXmbRmoG3HLz8zfblOFUEaXTpfEhbg18awzHhQrKFsz/wEBmiP0fcrcVpI4rCEJYIcEXgLOYiWWOi7Y+SZI+pt6KsGhFjHg7WJ1P4EhjfyyDuWRxM4kFboxSWxyL8AF9w66nc2aBo7Q8vFnpOVs2ZejQFj8IKIOtYaPFDmlWiWkwmCe2Q8aqLNhXcswnNhPmRNEL4ABMJh5IBv/X3PPOffh/Hi7QWZeNDIMRFDw7Be3xIHUqYjZ+0memIKNMDyT5cQUMyPsNfFmxUIoLvRj29mxV66wwnNUM7yTFRcYdzrXmR/rBo6M+q2iEVAeRO4UUkNuM5UGr9s8VlE4+ZtkL+5cSv+7nve8vENQnEwFMKkFrzK619k5K/Q8NAFJd82YcgLdbXib0wMvgHp1h7d2dnBQRCzFMNDROrJsyNFkKFBZM/eFipQe348r0qWTPXerF57FfNBaXnASz+zDFs7YkpimCHZSWyDyCSnhBnAF9wPhdyQFQIPZJtncoMCnhvf3JaJlhko0mUUmaMFO2/0OwT4hVEYvGy/MKULecgNAmKOtcRL5wBGZo/4JjUhyD+XUx9HAaQyx8WG/tNRiliupAl3xDED3MFThkE8hSYmG2lNEcYk9bLj/4LbVxkU6nvAKMy8FJOOicRiM53XPd81/bsIiTHfXCCuzE/PegkY4/JeQDgFMBftAghPvnmRCFiYXRdBnaUagsEm2SVHMte4b8wlnJGC+6OsqUw+sgQmgyef/5Lmccwl52e6k+L0T+Yo8GyKojRDyGgCnRZhzVHPzUXIQ78iM9lnudRn+IKYvts8lWjjZcRckiEB/EKBHaFcWWwmbxPYhrF6VKfqJADXAolCX1Ewlo7G5gTGggLg2x6x+z/afZJEPNcMwFeBUwj97DAB7ADwXcgmY2HgNxYINOBHhVcRgcqNO7fhzKQkDC7XmwaQTkkpopUpIAcd42HJSP2aJITi1cSi4aN8n5BgnH0k50d16/RwvkvUlJqaNBKQi8aZs/NEmBx2nTT2yGeEusJiyazoUCJ7OFuInlvrKezG3deDOKuR0gQOuLcRT4s16Mgp/448RfIWYB0iyZeNugJNEPcAiPT1TggNrKlANR4vYwsLURJw3vgCsmqrtnQ+hYObr0dYnIaQDmGGE6lRmI3yXoXjUjwS0Jk8C5CgChI4tWGlsiPQrxHNrGlLti3gVZPU8cApR1+unHrtoAArWakDSN7fcAnofmAIeyx4j4stL3A9NGrWYEiBx5w0QAbsp8IxiNFDtjK0AwBKPWZhp7wvT6+lFJvEEYjkk4xJ7UutTcTFaAFxfsG3EnnyR4M3jmCixKkajbag2jK73Rkn8T1noLhPMJs3+Fk/J9oDDRpRRjJYvotBGyN9hZYSYgQCdvBLA1kspjQzsDhhMCA8A4BwUSTzCtzIu7emHKZVDQLrL20gv2HFNIYlcLXwJ1CbzlOSr+e5t5hbDNtJ+YRJLVczWoi9IzAFXE24HkCD0hl9pJhnEnGZDXAIX1ZDYgKTLJWFew3wMxnQGdoJwOdYUyOE2jvIPrTnhT81lVHikk3JvLrJOZxnO3J5OalgDAzrRUiSSOJ5aF4XrZn+mPoiw8JKyXqqvRGDXyjoUJ+dyFxw8aspTmMGORiwKJLCNKy14w8hswB2/kHnqzXOzEgjEwIC68B/Vztmc2cM/kd3hJng2AOnjRPVHT3/Elrm6Q0rGULvQQ9K/TLgZ8NwaGnAfnFZhyY69zatY3/4Nnmf7PJ8hdqs+piyTwp46CrwWaxEgZlAkQUBMdb2cv4G4ThOvIdy8HHqzXfKCMD9HIoUtcGeiOrZLGslMknceUkuXjaxHTNUXshuqReYfwmRJjfvX7RUiffbFRSvG0wGZfaUohiz74ZwfPFwZtoDhD/1iX2LjlN3N7dvJKN6Utp+FQEnODMihBswKidcWJHY6o5GnfVt7q280vEprrdM+7nOeX+qFYocxWm+TO2xbpseRJNWmCJAwjMU/YLx7OYokqGvwskA2EgoVov6qrNdDGwMDF5+glzFYfdXgckvwADfJn2PYcrIVDlu9Snc8EHT327271M32OO37ueSShBa/v6YKDwcaABdseoK4pAg19GfEJQSYUTLMwmS1z+QBuIYtWNKPH8ejCUmIaEGeQEaKLqHaEMJFzsAeEIJ0yX/4EmT5Gyp2CuUHIlh/rNia4BB3F0892YJfVr9xzVtNLuDExd4eoK7JhZuNcvMaecLUTAc3vu04M+sgmpt8IVbG1yD8XAdkQmCALmP+lazfrmr9nyyisdiBAnmbNs/sZ1I9Xtb4RuEEzRewoNrdW3bjtUpsUhvLnj+81uPNYohx6lDkKlXIpUDUEdXcsX901Z0UJYgD+3MeqhuXFQD2mhQApxP07QfOyQ1Fu4bdp0lekhJlNLF3sHl3nW3w/3xP4W4kWB6g/4KNuPahDz/PamlB/zntT8q3nUEJYUSR0msmUJ8dmM+ab8aUTYvo6zkGlO4fuL+TsOuFXLXoacoUFDEa4gpq4LnXTvheyZfLZlRf2nda4kajLkOFXLJRI9FbBgFJNUeczL9H7fy2S+gmBWCfYHt3uSccJJdCMxzXlZQGZtfzr9uNaxdWuAxDBg4kqtMaVTiSpNsMZ03wCRAASGmDF7W3h7zCHsCx1rZSm2421BFBivCQ7XnPXLdu6+rEo2oiUg4usPhCva7iRfOA8cIYL0LOa62MElbQzM6JOs9jlTbCgDiYYVY+pDASB6dCS+4+nUGdqrRaIKEHOUjvKjUBte/KYRWdasgQCiUWgldrpKY5oJJuAR8bOisoyInW/m0dcLhSWBGP5KNlSRBoGbf6cWVXMXkuOhTDm58VBWerB1bSY+0kkEUjSuhKom0OOVnAXBRckpQYvAi6T7uxj112YG+h/ct+rYcA+ncJtyWlcO2MFoUggxBTPJYcMt/MJuA/fmMKPq5eNPqax8QRkRE/fnTDMFS4/xMPRQQiiZYVKyF6PC04gCmDQ6ubApfG69iq7u21VgGJHzIB+IlB7YygcrEiYiz+XUMqet8eb5Oz/K342SSZxK2No/we/pOpOi5/BoCaflE4TiJwwGmvX0mn+FicyFiVzKbzoI8eqD6SvjlnDkTsZsb7ksdFOVSg9tohurRVNF82j+Fzut+tPe/la5pC2B6ZLowhzkNO51Cdf/baFAlPldkRP6cgKc9YzAJ6ubqp+GxGVgnU6gBflodXjupwPzolwEdjBJBExJKrjcQxy/gP4cTJXPRNmCDITFG0pZz0SXxtFmaBqKZdymbNzvQVXnngQ9MR9MysgqZDVqkEPWkdCfwPshyrdggWLf/J/5qa7Wliq1zSsHBTHhOWIa4E0OFX49OjSnRcMDdO0r5jxKGJXHUf1fGHfwf5c0CZc7LTfWS0S3xspzmH+bU1kTl7sYJJw6GthH3X0tA5g/S5CA92OnP7915Zy7hnRJ7raAWC+sDMLc6lmU5k1693rjdeEej0S8G0WCic6uIOzvhDFJ1WLTnxHvyVbzWRiUsNkJQ0r6I4iB4fJYCRmjio0Q66gfjfxIAouadX8ATYTc0/ZPKRWnjEfFm6yxfE5WNvk14zhfpM2FC7gFqoXo87Xz/XTLNrtt73wZUyD5irlJ3FolCd4HZsLw5sbgumM9rwFzCYnub8gpf1/SiMM9+z/MfHxA+szBvT4Y/CXg6qNtK+yXcyfnuZVeTdaYr9CujpOmlJnvYzwSHWCrPBl/kn2qhZVcGcpYolLbJCbZGsaB2UouqYPGfJH+yHjYFPMHvGzmwrbldbdF+fVdiOy+zc4QXJF1tdi426/bHAhnhN/XvIoKQ0of5BcEIxIo0CZ7H/QcNodhZiB9NI8fGHlqXLOP/yXfoSEqdU/04dnzPpSBhKYhgNYXhgYQBFAbwRE1pNX0qcGYXVfMaWSz8G/47Z/ku9xzPrrLCFeHxOUsSIfp7wvc8PJt5uGBqp6mZi7hMXWhV1eA/NycJwePwKCzxGSw+zKkPjVPQUDEbBP5g/vmdzAep2mFhNeG8IDkRxTMDnFnIi8sb19M4YTH8nvFRdJmerEuYbuwmXcQu+cuJVIvgSDKAdULLpBzH0oYAy6e16kGDUESeFjFhAeMkkIHAIe7RTC3kAkalCyPO5k+IeSPLzSBO+4opUd9HB2aI+zLKbnW8zSAkrheHkZz3pO40gbtci6AEPtz+fJoY3krZ4OOGhgxrlFbHPxhzDKhaTCXW5ha1C/cP3NlxaNIo/JC6haRG+Gm9Qr7JJ8+yA2cP2Y77yd8TvndXFjHM+blGiXqH5Sa/W24CL82OHtjjpUnGmqF91z98nACaUQ5aefawGyGCfMKOYlN/K9+zGbE3eq9tG1ZzW1yMHLLPq5QEVdscTFfqmZRd0Rd0o9rQ45BhJ8JdGv8WUN38JjvcoOkv5eCM+T8E9A/kc/f1Qvqxl/biJx6PbCFJqV7rT+tokejZ8u8PcvSUAYSSvYs/y72NUU6gO+OYkt4JgcqCygi3MZ8H1PJSb6YkPHLGNEttXPgbllaEfN79t3eQgC0gAHkSamLFmsxjlG9KJYqw02yT5hRSEcUPwfEFGB6Ot7O889DpGIgtTKISHJlVf4aWKldduEtWxNbbBCkUSt+Dr80Vd6flsCKTo85XU4xc9gqiffNHTCPX0sojC1LxxcbSkEUSrrESapUV4ZAthwKPTl8SMpOKfmb05NtZpOWuzNRPD1vMY9vn9xBQqR8JHhhdkzgq5WQ5uZfWsOlVpggKVFutsEjG2GSc8T2DpK8isc/5J4o+XnSSdrOTokrspWX93pRkEy9o7s3M8WkUibRm+1osna3CigoxkMaUxNIw5BHMi9ze05JbF9Rcyex4K4mYkMD3l4DwOyYjoryhazvh8MCm1GtxGmGjRNzQtUNO+e3X37UtpygNLTW8X2jbbnBUxhvf7bEaeyckyYchwoPMJBX6cG8BBE++7oeIa7U/7J2xDSA19bJQ8c1WmuhpsdxpOm2BDKWcxkercAEn4d4xjXT7TghPA6NOU08y9gXPUtrksOYE03C5Jmn8Ard34G0pTZOZMWe7GVU9a3wEKbCaKJVP1P5c8JW0IE1+hbGPerWzSkR8Y7EVPlEQF5izteIxvoUG4W4sXACdqj5AuHBJ3pmooQbx+CJ7md1KFtFCBu2APkBu/qrNfBYsIOqi1nzI/OFey3ny/fgNDjIIJ+3LtAJ9+KJX+HeKZubSG1bJo5cUvDZmIhcUNv9qhkgySzZ9Fbzj7H3y1CisxXjHBbaB4H7SEguL1WsKyS7iPCABigSGxP0GyRYNmDRCIierG6qcjo4z8toXG0H1zeb7ikVr2UDGhfZjJiQhhYVDYp2RTIHT34ftEuwQOOfB44FN3hOtF0x55FqNrS8qWtKeXOfSW3p81M793ZWnh41btPe1bFU6eO0+bib9G/W5w2Cv+NIzM32HXMDguTE7xAGD3RjguM4C+Tc2eGD93P4FRiyZVwxLe7EnHOldhGUxtHK0CwCC1Jtz1m3DFTGdjCupwZmxP83dLLwg+jCvlwtADfG6cSa32oF16H7R5rV0BoLflsYPsVHEu8tWYqHmEypkBrEvMPcdr+6l3MqnNGdISa4wBEhmL+CHSz0cBH7P6SBrHOahO+Hvs5AaycOmffnQtmfDc+GMpCG1ToLyJDsR5o9YhdlUgvJgc/82CTkihyywnMH6+5bWQAdXGJTiioejomhOPhxlE0jBrb4BLj/qFKV+MJjhx0yAKRP/W7tm9Teid/ukN+RjvAMEVicUrWh7eX6W4gMwcLAnZdLmYg3tEWJSRgOZkgdz+EvrpXbFJHiSaKwYi7p0OFO5LRuA/ZuTDKyEUzZcwN4D50GrVNewtDgTEvgj+xmudBpynE1T7a+Q35mT0EJs3w+cIIkSFBBEgv5I3rXuPgPD2cgkiH01AKzfqC/S7slU07gvIs4WBhcYkdqwPLDaCm3ce+tAHcrYV4vAHhZjKMPKR5JOd9/UqMwBx5pr9IYYGLWQsIfrdqhzzfpV7xf06EQFDCfKJIAmrxe2CVMMZGS0Bnkt1dIoVzYHK1hHC9i2oXwiqmXSAHnYzauYs+lmoO2oaaDYTNDnDGYGONSLohsFuCK3MEx3kQ7WxrQ8GUty+2hEupemcghJlXhPqbn+LeJxrabK8QAAEAASURBVETKOuWVxCqKrZIv82fMGeccSG5SFLST8CtyjkcSez8Pyx6qfG5kz0nen01p2AoVqZwQ33g38L5LAhAqRThSRyQhAHBHlAlw40m5K3otvvRtYCbYydx6Xg/RPu/yBwbOaFkxZ6Oq5BGziMYkpjASKv45LOCXIKVBPEYQIPR7zGDllThaumLGGpYCuJFn0D49LMdz+asTK+1i44NQrjJ8/oxTJeONIoIecOwQw4ULgtDzA/HYapj+DOauT88QFCocOi/Mqqi5jf2pJ9AwicYLylrv38W0SYyoiLgUKj3k2ZwvN5bItavV6xovIRLta7gGF9EYU6iKUvlZL5Ab+EIzlnsbLrI7IZApDffje1dLTvXGCohafsGCn8VhQ2IpVW3Y9XKuiz2fiNFp6hPE/bqERZmzkX6RcPshwCPCi++nwb8Sx0m/U57P7blgfMyqbrEyDI4QzdVYTqKTSxwv12nVepIzAyLI+RvRTDhx3Uek4Zdwj/b75fc5UxtT8j6eNL9lmzJujoB8QiR2zkRl2SjnVGjiWi1/y0ZHQ75Yy81W8BABsm+q5beFlSeNT6tCy5vbs13HXvNk8yeYs3mYlog2HJGyOePp6vjyCK1gTct99QiWaJ1lKa6HFteND4kWIvMdaFQHrk7zEXxR5pu9lHcwpxlHrg6wNtAqhQ5FB3wPXN0RkyOC+mCHbgmkb4D0uGogMsVHclJfeT8uGkDrirrLXC8xHxL2Es/PvpzrWo/jtrHXtq6o/Z1eGjPC8wGCH9oyKfNJIYBQv1HGyw9yEtX3AwaijIjyuQQTmYAKniv2LP8w/6t+SrRA4Pd3OQcjxA6ovFWGFUqwo2sDEAONKAsBl7yYci4nTMUH1dQC1VHQ5AuVwqDlT50X/IBRWO9qtAB2pAZ6YTzzapJ1N2k55kf2FfRzoRfalt/lPofqDa0nwvSuY+8GN4yEhG0unNSswR0QmLEwa/wH18wKTnCN7rSFOHNcp4VoV8OgF65BGIEVYiKXWYmpzfHsBjH7QVBTMKV9STd7jRRtNHP0wBqXXd0LgfwrsCVCtEY5+MT0dU9O377C6qVUVDhab4OT52EwTwD3HvUei+Gh4HtPxfYHe9Pbd2xXqbd1R816bn/8KxePlbJPghem/UrtuqdOEfgoHEW7GC0hxMmYhaDKdcecZF8OI+53ps6MI1Bcy905LbpnGd4iCQz+oriCcwqL7Q0R0x8TboorBDZk7kXArd7Q9Hb2yT6r2kdoa5fKCFkfwGONrOQxYDPaWIY/D856iWD0WWHwpIwtxWnDxL8jX6QvKgyMPg8634FQDnr59sZgvp0k1x60Z+PJ66Ues4pLsNS0rt+eNS8jGUhoHgBgwWlqpapH8HgCoh3eh353XA4YiSlJtBK96xvEU80jCliWN5S567hrAoSsWddyBXsoiyBc4q87CsGTyJg9xOdPvVjvFZF6QmabV+XkPo7CsiZX2SilDnEbUSiTrumtCi9W7XdCCVbutz6R8waf0Z6IuVCIj9j71XyI2SEiRsBetQXmiNP8/0assuuVFriyOc5mueyBEC4kNyKZa51vCIOE0oZAtC6ffaPbvfcX2JTLuKiHa22nv5Prd78lZXLnM7R92tX26Qe2L5l3JXhrmxcRmPBe2i2RcxCEMfklLfdoPyBBubajD+Ftd1wM9D9u15770ArKuZa1G/y5gnaVGCjjlPyF2qX/Qkzm3LJ/Rs3RTfejFbwEjaBX74ew/kfVFVfumwlPn0s1XHX2BdUe0Kh5r0qZ1A9F8w48pYCJmLMi+EZjFLwXIYdxyr3oMNlzYO490g7a4o16EZJIvbIu9J1WfJew6sykjFuiSpv43ZjnzlY4Sn+kzvx2dB6VoWdVAFjX8i4m6Hsa3DBZWsLVuDudtL9axiDnVvSdF0DwbSUrvtfLFc1zapP1X5XfFG4Ks+G4om3kHAu4evejGPG4/E2iB4ErGrKAqSJKQCaeCBgd41ZcOViivIRx0ye9cKyv89vOtKoSBI0ucPwUcHaTBJ5VYUD2VIfPt8LH6HwLcwQ/fmlKprwc8xXjxs/Z2o9rgEmBv5RfFEFI3keiXv6vR8rnUYglw/Hsw6o648YKxiEHgjxHShLJAiyUm+E453oCPu13ygRtWboEqU2QbVMoLS2GsUjfhyXKz12/PSVSXO0NO0/F9v1NPXHuMcbR5Tn5RSPE4guOPQNf+/zYWMOaGNdXVptgvS6mAxRgwDKy0Xs3VnmBRmhewmwwuTqkfumvaGnajQB1uJ9kQGwhLLB1SOHvhYCfgmTVi7r/r3yvRQtcvXupZZGPTASpnIep6s1E4F0FAV7AnevG3du0lup/ZGMlP1K7s7XziQF1WuvK2t9LUMsBJ9G/92LbGdYmnfGzidSFtmvPZmfKjPm4hsLApl5M2yczm1dVldb9VJlUfvMNhF+/qeloDmt/GHj8k/zkTJlp/P1ttxKqfQ37IH9SaV3nJ78gn4WBCWGAoHKK/h1u776HndJpc9iD62EMl9PumSz9T1aV1/2sULsaQTrmXDTQ07MaGM0E73qdypoyTD3Xoy3fqMxgqcQSI4krOLjKEewN1Pt2Z1r1WzF1dbGBvwjm81e7tvH/cT3Sz5qWFo4EXL+u6QRUiQYY+4W00weTLff3736k7am6f9P6I08icTyA8bQssZsgch8i31e8rn1irivH3HInbV/LFc7/2b7CPs6zEetJ9gUd38VsVbIUmjEAI06Ito6V6nw1WQlTszAzjFp4ITkty+f8lDp/xr7Pedyd3g0jeE/N2l0z0CI/QcyvJ7Vvw15krbvWvgELwHsgwq+IYf5x9zX9GMeV/0LOuIv9HYLblM6MZ9OvpuhPuWhuVryizH3qIiv7uWGKcDf6Pq53uXZAqXlrumllTYc5wZk262Ttd2n5olh/z2Mw6I/H0/73R0ZExgTI+aCB6c5bdvfYzzPfc4nz1su+kMz3jdzPsk6Z8qrw/FOuO7K2wr4WwsFcvgN/4PgPfEry8SJ08xCnkQwkDAAYi7l/yWb6d7OxNksjUR7ihg+qujUKXS7eTroxm11ft77pLVuW1m8W08D28o3cZ7w0J+0MaUcAGNwylhUpsGbtztMIW3GnnDDl7IfEYxoJjyEVEGJS7NEGE821/sciKViJytB8Y3yTOVR8ZD7ZMBX3QyHuoVvw8IJ0OasB48jMnbvBuO4Znqvw9+jaU1iop+Ei1NWRs8aTTZborhzYpPe4JwSERD2tRALGDBXz/Xe4Pfv/AAGq5eQ+Pv5lV8TSvRdBlO5nyH9lkF2Q3zKGIkHZjyfo3wkQXYLm4Y3Tvddke7u+2LZi9seUWEggWRmvaITW/xEazaPEqjwl7qX/ke7foVKzhdBCnPYusZ21a5vP5IzDLyBKr0DylQCDJ3Cvyw929zRvJ97SQ+wl7IChEOrXn0a9L4J5nMHmtBxA1QCOLOif4E76D7Xrmk8R76Bw05QZorfDk0jjSI3i4snlTqfb3s6f0+5L2IMjGkl8AZFib5N2IZIPYuraAQ3A/9NORQQ4Ds5wKuFdZsjZIolWC+Mq4+T+DbT9bm1m6wWD7SmzCu6tmVW+b+nu/eYOmMgir7OdC9Li87kjfWMQibf5UbBjO1i0H7iK/jCLuhZyQuJUmyyTO2AM/Sul3J8w1b1ehaqQCeaGJtoKzyDgXwUGWdxWr9E9mX4YQkn5Kifdcxnj+QN1/4UWcCUWp0VTR2N4zHkvC72UuO1vWgJTWB9RIs5lc/4hvb2Te1Zy7chZFdPAcayBi1Nm929jU2e9UPZ/wJWlxLd6O6fiBVdow+7H8aUE2NUi8xwHJ30RcJvqJAk5jynP3dt8HTBbJTeSukk0RmPLkeyFuX0LXPk93O+UbLpXhINvS9s0yjoS+VKy5MKTyE8HTuJIFMBrwNz0x8Vep/8jZ2r1a71OwGBtLdc5bHD97qvRSP7AUn6SCEgQCa+ciucSz+BkxnaUmr/ZONf53t+2DuZx+WDDrApJ4bpm2rHA01eN+8V9HqVTBnFisNDYn7YGP8NgXYkcrfVZDYMzdrkJ/jqSYIr/M1Rrl7V7mIjNnL58PR04SD44wV6NM3tZurcvXZqohKLdW31j49nbV8z5ba6oTHjoNWMWLB68vEh8zVdJvKVd72eYX4N5EGUzMx7mIVXLDWEei2VetW1b0iaELGBIgwsk14HRPgziAohS6VTODLC7vQVDPmlRWC6KtUOeWCX0QLSI1uap+muUJ8w62tvCBQuCg2DWKXUqZ0AU9TQym5aiQI0zSZgXWXYu0ZEcU+VMoS9sWHr725JSg56DEa8zCDlRX3dy//UrTPeeb7Mf8ZqAOJeUIYS8jjKvC1qEwgUikVQLMeg2bl/nvb4T/2T7irr7ZaEiye0UqTc2q26V14kAGUvUsAhf7yRpd3dLqF2G5y6E8AXSM6KPfzLM4gu08BGnDPMGHkWUI/x6ci6NChGRBS8vLE54EyEqmPsmL5P+TNvK2brRKwQrl4+Na2J0paVYJIXqZ3kRqR0msmfZUbvo88ur9zddTdX/5KSmxoRYIFXPRRKfK1m1PvkgvEgIA9yLfRPj9XY2c0nUVRDC9fKzSqPDNWaBLTAJtZnFmE+/CPz+hXNNEjNKmB/hxpOz8TccHJuOkeaCSLVSc5/bued/2nbUXwXzUMcTNKiRQpbEbqOtlqX267Ubmh+CIH+FcZwuI4DJx008dSYE6UypMEoawFHaQd1gnhPs0/zEce2VzSvqdiiTH35rn5rC7o7vf98x+yqv/b/TbNfudZiDdF/RSZQkqGMJ4smSqP7BzXFwBeHN62x7CJPjp1svO+rnos2IhgOd+rIzs/Zfva4Oh7J4s5W/IVbO3VjNLcE+GpUB+emxykr94La3TI/q1weDMzT4ePinsN+tl74UZmXOZq/nXyj7SbTP6UKcgdNsaImOIzffZNQ5kDhjYm7r3d+GhnwVN3beqNWrSTDPQhKua8LlpOKV1eDmQAwmVetmuydu/WHtS4LGsPbRrgdY+/tbWfuHNo1kILSo5iDMNfDqO2JO7PUA/4hMmZIk/fe7mJxqwoj8hlhM1yAWfR3JcKvakIf1WtTabMq8nsNAHyKI2ymmvwvrqWgeVuoZlnu0rxx95Qo5O9AnVPiOiYcsQdpYTVsNtGjtOq+t+VFZnzy5U1sUDyNZPj6ntjU56709TX/0PW4iiNnBPMGPY75uCbUaxJnNtrXlP9G/hFG2QgzDukPJZ6xa1LW7wZhe0+2X+cQuaitTSpVNPyHFcmdehJBjl999iZqrXsu+0puh0itp82Sb6WdnzUmpV5XHVUC+j8cW+8Q+ZkDfubltZf1d2gUxdYQeVlx3+r4a07wduJxHKL5ZbDB3QDh/65r0I5o3/6ZHbftuNZPA1D9W9a226/yejnczz+eivR0D4amAwLGuadrziAtl25Gc/0TdP/B6499tf391t2wUB3smbpV6H6q06naVujWRuYzsw5KeOaFdiDHtfpzbLr8OM1yG1vFGxJRj4RfT5EIJdTHVeEq+MCMxqfzZS/fd6vfFvyNtK+MgzodqBsOa0K9CvEQg4h1G99H6G3fdAKG+HHPOucTWOIaN5HI8uQK6JfCV2TJoCdb8nT780nWS39uzbNYurSuoZyTz0B/BB2KaqXa1rO4PPDqj9sad51L3xXw+nVsYuY3RlGHiU1giIQseCUHdxW/3+ia2oW1l3W/4zklx7PoCn0JJvSfvjoex0c6vWd/yWhbiFeDV6TCtGtZjiTAkEUBI1O83MWe/4/n3uAL3p1qljENMSw0NRgJEch9KO+XP57dq9ii63Q6z2cadTZqXF6DzXdbaDkEBSj6gz/Xg9DjWQFSJ9DvUtjHZfgl6st61+y8BJm+jlhN80z0d709uBIPeR/Ptmw76/RfGdWs20fOdvRcf1xnMN5UOFxZC3OeG7Meze1q+iJIv7uTdttSX+USIiWiCfhvzJbf2Xedh09bM2recDDeg6eLxr/0xWwh+DJBueEbZKANYakMVGiErgFUB8AvnH17+0HzPYApJckgQVbV+lW7eyQRKCjl3/c1Ns9y0+TvIhtsncTLLp8fY0ATf/D/y8lfEIpBKAivJ/Qr+CyAaJ9qy6dMFMf2MbpgLZ58od3fxFIrhGdOc6htY+BSSVLS4tW/jfUH9ATlksQ+m4c+Gf5echZ4N1jDy00Tzj6xh9CeF6g4WdrCBSUn2Lcqtl55rvOQszCwcmjK9WApbWp/a+pRu4GrtwOJudCxhBJKG1ztcUot+l/coCSwl3yLmM6qH34K9Do+Lx+LlZPG8rLs3nXafEgk4KioazPw/b4sJA0Hz+Xf2UD5hJDz6QPpBTA2njuhPrmD4oUC7sl/jZdzZOD1xb4kXx9sqw50Ye9Lp3sYhQSWV0MoG8zBcGN6GfC/QTv74EAtkhfZy89Ee05tqVuYU1RMx5+FEK/p9+Lvkz4OjmKKqU21Ho7nVsC9Siouu6OPdjhdrab6s7qkcLksfJXaXML0DJcEVIeJhn8QEnTGpucxqNRdjJQkrg83Hba0un7NjyH5SPnOKcCFqKzztHX3VuZMvB1pruQLj+CBtSkDLPAapgSwTZo7jckcReI6pkE2Z7N60qdjZsaKyI1er9F0cCob3J8owfDzR88m8H8q6RmkfybtAUgnBJ66T/RuB1+5nc/PVmBoKnY8oUPjwP9IYNS43OUiYBsJsY7N9KerkS5mkoDPC+2DmYtLAU0ekcPmh8NgP1H2WKAQBhDSJQAM6UIFRfleCRzA8U60MbItc/RreEJcrEeZBqHIk4J5eQTo8Ty7zKB+0DtxlzdZ4cBnT5sD8Mkr2MR8L8kchO+Re+EKLICIcSrA2mTBEO0JIgST1yUVOYlZZkreRqPWySK9lkcpGrxIY+c6c5bc7vP0GiJGhLiFicroc80nTpRKbzMjf0CTEi0B94WJ2tzWocZw89lQ1M0lcr4H037TQJjUgBsxtaC3Bt4AABp6A0i59bLUWaU8kvgJJpNh9L4jaLiylFygWEloxMTuYTscen5TXdi5w9B6bfGZQqO7hz6L8aJVqVmUfg8H8H9nkb2i6nK8ylxI/Kpr/oTkKf4vyisC6aLHZvgSGYYzAPIB7WAopMHBJvof3BhhTHuHO4aC2T3QKMflJUtwK7n7X78z3wq3ByfqCa00zjfMlwDsiHjSAZ6tjAt/Gy+1eSsvfyKTzxXUDAeMIAmGOzBU8kbqVQUX06SDWq9TFWjgka3+U/g5KcMMzyAQwUWwMXY7r3/UcoBFEh4ActjRuDQRmMQM7JIijFzkERIRZGNZTFBAWHaLBsOdjf5VSUpNASibW2D7s+9ytvv+7qNPvCjUjQdrh7Y1d7/Pl10hqjk7hyoljYRpyKG84AxgNJqNIUqLd4JVFjLMOW1/5gr4hUqrWxXyJq7UwiojxyeliMRXkS+JCJGFimCSOZkNWiFcKbRb5et8HxF13THPMaH0W4l11gc2dPpZ8hdoerfy4nhcYXwRfMVXkj3Fc9Y2RSeZgzSZCjU5hDYT7T9KWMA0xiY53LsdoQgmyhHsXF4soNW+aDK5I6cO7HoVJbALXcnhODw75fEdAOXLeR5fChVuy2Zy26e+nevZ9DiJdHRBpiPARmxRnQL4ghs2Ibg6i5YifRn+AGiOajAqofCypQH3PcCLdfEnLIDmRDi+yasPPkpdIOh/e3YbhD8b4Ppw4hQzFut6GhJs+xyRLPbye7qGGtwVMnk9aBglMbPqjeLhFLcqtfNvk3HTSWxYrn1GKF1GWP46l2F9rnncjybIWJpQiSXhChSaaeXzjm2itBfMHczC6Flaw0AQfBgEhQxt9XtmGvM8H+jgcVw6U/1D9HoR8Gtn3Q1X/EVrP6NK4TARmiI4V87Df2XXiRUCSxfj8SnKHuO8RJVS4jx1gg+4JorVe2LSy/mHd+4hU/ecXVJ7Z0QYmJTnR8pQzvXYqe2DTcWx4Kxv3LxHGIWd8xttBPRXO3odESEU5/ThnWLjOilsUffOrIEYUrRyp8eDGO8hiviIEniYIjM5ApMF7NmmzkM5vil8/C1VcN58fXFbOaJSp1+ytsZSpj3n+SZ6bXdC6o/Y4PDBuUTt7ZMNVKBVfDhsEcmeV/Gu9jhb8E3GlCILpfU36IGd8NPyG2NbV7JhnEhG2I2Y1MdE2bElGebMDzg9x+RXffZzm2ejy7L/reGSztJiKEChCoCAExl4cspnOQmtZUbedk6I3OKVTr2QvRLSQsRlPwaaeZQ+RZPViJLbdm95VLwen5C9I6qUWegxFz4rvhw8CwrjZt2i6xP69dm3Tf9nKmo9wEFDCfCziINqPM+nei7dF1+c2DO+WbCyqyVEFoepvtb60K+veTNkFOIp0O9OrK/zOtm9wrevdoXfd+De5hzdV/F6EwHMcAmKXGTvJ5hAqvFxsY9Pu42ghU5D4ZPE93Uxk6Ca6eM1EEn9oA5dzHWx6/n1wE/0Q7s+ggcjdCG5P5/q2FXUr9JT7jrmZghuTYX/GBmTx10MKgTyYc+HP7cQoejNnVDKE00gSW6oVneN6IvD8zHFj22ZW7N0fxacSzWTftJJZCS/+Urau3snm1cUc+LMc+EpzyjvldrT9nHMp52lf89o4pH0vVlaEwHMEAgdmAmL/xQe87V01rTgkfVZO0JKe3s20fOAGLreEryaEs7iG8rdw41b1BhtIxsfWoPLrOcjP2+fOxT00OjWKGUSIi2giEvjumdq4O8gxPauLC8zFFEUiCvNb3P2tX8cExSEuPRHNyfWKT1k/9huYyF/39Fb+mfMdj3LY7I9dUyv+EvdiW00i+VOTKruEuePcFlNYUp7yO9u/kWMe4kVVnNdnNYoUO//0Q2B8BFh87/FCaVkx+4uYsi4i9MBLvf5uTvQ+nW69yIaycU04CQGD2KojcESHCqrL2/fu7uHcjiRZ7CM8d6MSk30X7sXlQpLkRrpcoq1Ad1NGWv/jprKmirrMkINXubzFD08bBIShC6HH44kYWu8nJtr3ODX6cdo7B600wYFA4Q8zeZkZyiGCT/xHgQ696qRvxIq6G/+Qz7UunxN4XYV1Pm39LlZchMBzBALjYyBCnMNzIeyhr+KE7u+g7WxQqi35wGawyQCLiGRBADBnHt41p7NTOj1HzKEKjusN7Ok2EAeTVIIwdiTdyfQAGiOExp/Cn+WCn2TjLX5G4mvNWTA/ke5JTUv42Xqu5LzIayf0/ZvtBvKpuW9SjRULTQ4C4i4r2uBG4zQvtfdSyb2zrms8Pm47z2G//DT4xTw2swiCZAgLLzKARRDx5WTwDk4MbzZO4ldty2r+xPcg/HpwpuH5522oACi+FCEwMQhMjPiLuYbTvdzetzo2ZVYDUU+JDGfEM+vpShz9404aoqMGi39YMyJNyhWcT2eYFctmue9zwpQb94SjSJwbgp3RkzJnatVMt7P9723L616kGpCYVHJmrmF9LX59+iEgQk7u5Hpec8xLcM3t1uAO++FzJAxoWGiKvNLFj0UIFCEwCgQmxkDEBVKT9eVyFKds+uvwm8fEZCGoeRaeURqb5GOpWD1mRin/9B5sRNUKb4wLmue7Rt8k1AVB+jq5J+GFeuOaembhtVZMzzQExE3XmjpCR8id7Is5uzR8L0MYhpwleWwz96FL+AsJUoeWXUxFCBQhMCEITJCBUHfoDUUo5mnJZMkWQifPhpA+3ZrIhAZ1SDPDL+CbYsuKqhWC5HBwLe5m+s9tX1H/i+Ayq4UCg2I6IiEAw4imTzG+yCyOyGkqdupZBwH1YplQr9UHf0tSIopCR18P85AbyBDHD6Nn1oQ6fJCZdbscF2IiQvMn8IpxHaVc0vNBZR4cRotcRMfdkh5kwxwowdiiJBqMeJk9l5OON9JiJzpQyknMKjGjyp8IMuNOMAzRQlQTOUjmIdqL/E00yca8mNhyWjwVyBhk3uV9rLmXvTUtO9FGR8kv7Ul90Z/0YTJjGqX6IY9lXBOaqyGlx/8lWlP58B1/6UObU+ar4HwOw2GBO6rvoW2c2iJY6FphnnVuaVvmW/AwSvJc8hzE3E++8xBO07AwU7O28TW4QQbeK9y1QN9kkURdfC69MyibdaZUJggxfzVhvj+pCyM6mzLekcrk5tvgo8kbbmYZb30F8wmBG04oCz2LCo/1m+YRPJnEpB6w3qgDY78LjBRTh41phLfUIWpv7N4U+PUA7Q4PMS5EQzwGx5zzsM7h+JJr/QBt5vIN/zBGuSFtjZFveJXB97FxROdw2PxpuULtFHpWqNEw34i688vnf87VMXZfc9nkQ8HyQ3IM+TIEhnm/jLbOCz4v1GahZ3n1Rx+FWR+IJim8pEDefIyAYVTh2O+Tl3hhHgtv8ZG+7V3cWf1mm4j/GG+XBLetwUR4nwy9Gbuvz+SvYiN3uY4S5rH7K8o8BFFyFzONs2shctWsazydMwonZxPxW3fb4L7w6hubXsf2ytGzKupu4irarN5rISE7JJKshK8WxJA72KMb3kRyqGRvKB9ZNLz0myScuGuue2gwFLpIHpJ3K0i4iL5K/C6RkGQPQPYJpJ0qfsuP6yV9ldDkElFUfsf7bHCUi+VjEO1V6yYiq6RcX5FyJFx55UbP7KMe3MDnf21bsjdVPsUtdfo0xLu0f88mo7dFRuXy+3TPGupv0NsxI0LL1QJcGOS/GW7SwZW6327mGmOVniRK7KLFgau1tCd3TAu7W8N5oUULPO0/X4MxkI8NECPRaleTS26UjMbItVdBpF7DpWpbE1u2buTsD30QSVGi0BJFeM6c+VN7S8qyey+a0SWeXzpmga22y9WtwtT2aQh1osjyXBYCZ6mqbmo7y8lmTzBx57s6fiabdfMa62Rnub7TCXDntq2o/6aWlyi+uI3Pb3k8vu2DNl29ofEMsO/01oGm/zGrCHIahLVnjoFh8xpO5dNHwQcZh8BUktzCmY8bwVOaYf9y7a6z6dSxjBZXRz/uOfaxtuX1d6hgo3gGzuncPUQo+DCcfqH5Eo2wLh+HqDEfh2izZv2uf4RX7m6zXIaWT6SkHYG34rjMGbCT32U+IvyVIJbCZK+lnWiOALrCN/yuMLI2XXdT04t8139Lvym7tmOF7VDatBX4LwAmEW7L+li02gnaZIzNpcBjQd75rgBI+jq8LxqqnvoE9jk4L6Y49Quc89anXIsMfI9r3fHN/w3wh/GF8dSAx1u4m/rNtMEN1fZH3Hf/M21P1pGkEesRmEib0Xzonh2eh4XmW+5WoS9cbHaMFzdXALmjcWt/gLNN18749p6pyUz/pdx2eB9zwX1JXBLB4XAnk/0HLsK4o91y3/1ozE8yj5Imz0CoUMJnB/b/2p9wBeZ5MOufcZdyAjff59KeiLh0WtE83K493Nld/7GAaMnTPA4+CoCHPJbLjgRxjF8Sm137NdPUfCrfL+Ve7dmx0spfuj377gOmN4RlAqIsASwFOQNiMOheGjGSaNKjdwMxWaU1BOWDRTs0HIfkHbbQwzaDN5WY9dBk1IchP+e+SD1RP4KHEmLEGdZXjWTbe3PTFDfDzQnZ7OvJetew9oMxDu+T1BXuXnDb5Pr4jNpl7t4mXG7tLFNe+U/cg76cBbiB+vKdFwIYQRpIY+9LNWieobDRR4LbYdkA9rn6M+uafpMYyPyQuf8EWaWtaE4Cd+LgvpbgWYMxetMhQa1xO395/Oi6r2Z3NrVS5scKJ9t8G9fjbXWM/yOecTWAf10IO74asy2sW5gHcHj/rPgxN3DNYBfzG90pMTg/Q+dBy0O80HbBB00QooY1/DUIa/1MfFbdGe6e5laEvhI8KqfhFHNHa7ruTbQv8Bgck5QV5jF0bpgvGGWha3FFYpcWorXh25t58Bue3Gk2bhT8j8pGbfAobC8oMzi2AI+5vybvTnXJHeXnQwQjbgl+EcGHPpwynXAY0zEitL/gkmr+ETxyYzRD4UTpQUY32BceaxLYD0+D8AnGZL2lNl7ClbeLbiGrMDHFRnB4Xayydjn3uW8FJnFnRt27uS4juDAv/46TqP6oH4NtBvUXWtNhGa44OI2LuH6HIM8NhPbB2NQZl3CA9hK3p+sCUz71awSCvZesIkYaIotcFqur/5zbvOvlWjzsp34e58tBMRBpQ+3/cMOWZfbn1Rtaz+T+9F9w0HAqV5CmjWOTIFOwlMfZoSMrG+c+YomkuBH7XXs+xmG1LwbMQxfi4OIdb6dlEYJsLUvsJlyh18cqZiwnRMq7+9zYCnEPjrnmEqmKe70v4mreVdIA+0zfaF1qv1uzoeVNxvXO5NT1J2Vx1q7b1YBk90jrSvsjlUZYZFXXbKmIlVWuYdf/NazjPyBgfgoC2167dteVzMVK8G4/xOoLzSvsL5Dm/xnqvJfxvBiOtoRZ+jJS6HciYqFako2dy32OuxCWVyEu3YzUfx+ezF+GnT4Wt+5Hdy2ze5C2LoBe/DOrTlwLvgEebODZXM/6X4S9Ip/5P05UeGsGeu1qvPbixFK7GiRHCjMvMZ73OQRi3/rOf3IP9/e5Z/r9jLmLxXU8fe9sWW7/Q+DBAnhzrLZ2mdfStKZ1xewGeUYMrIc5Tb4e7eA71Ucdt5C7uq9mDNWcCbr9/7d3HnBWFecCnzPn7N7tsO2W3RXEoBiwpKiJMUYs8anR/DQq0RilaSxJJMZEJUQBTSyJLUWMoi4KmNgCGuszz/bUFBUrKBEVBG7ZXcr23XtPef9v7j24rLFAMDF5O7D33jNnyjfffPO1aS2Tmy6K3ZTZAY/jKWwnepUqTuXv6cC3b+Yc/suAyQ9KnJN1v1/KXpBTOYV3CXCcSTtWel7RGXaRVxp43kzHKf4l97Mv58pVtGg/QQPbLdsew7Enk6M3Ze5xHG8NjGsO4IyAzn/LJVKX0HcH68DanetmxV783IpJDacIvNls9y1qbeaX9Khon3+IjmrZRZfVVLEU/mrqT1N/vTC4vPbqX0KaWobOfBSWK4ELhqPubZta39lw09rtPOqhT8bS/08wIzcj0xvLxopTs2Cgz4P348Af+16CGZnJTX/OM/rCJUuYRHBLIap+hMdbWNI7EIGVkJ7qxBI3xFKpyUi366HNa8D/ePD5tB/0n9+6v5WO3pT8L7j/jxEPsmz/F5kJDbdGb06eSj+S1Ipy2ePRtOOq9GRrnpqN4BLLKx+4D8rK3/64DFIRa27C/m7djWvHcM3sz8g7imS3Asul9be3VNhd7s+oYz/inne1d2654/f0u0XnKF8/DV0fTT8+yfsqaKuDmk8j/58yK6+bhvXxltbW77PF2bbovNTpXAebBWc7w0wPoKyrWyZb8+X+GOUHl/KHQA6uBa5PaF8tSk1teHYTngpMWwR/Z2XFbJpxOPlf95V3js3h5BDr98Hzc8BwGnTUYQfB9OT+1vLojWv2pi8upc7XSS9KdEYlxtMVpESpYLwd6cTik/xM8jxo+DLSYIEmf2LHEjPYALuQi4hraNMo2hQh/fFcgPcbLAVghB80p86noK+D0NcsW/8QOntL8mLUvEjxx5A+BlwzWiY1PkXbFrDdoC8SKWt4+4ThG2I3rv2qM7LhbrU6dTjX6kJratrw5reGyynrlHe0m0q93Dq56QUjRN9tsUr17xuko//xAPMSk5ENWX8CwN2D3s5luqo2AnGhlfH57xn6rZLKYjRmV/V2HCu78A3xz6aLN2l1W9Gwwo52J1AXMBGv+lTJ9TCaabjGfpc6uWEVhHaiU51YAPNgAFg5uy52q7g6IIrPczrwdK7OBsWGXV8A4QgzUqO3owSCLqv+nSqrgpnrJbqq5lsMuPulPB1r+BUMbQm5ckFp5QNyDSrm8zHWsLpm9LIGCiyzy6sXROeu3m2Tpmnp3XQ0Th3qGMrpRdhdThnzgCtt18Yn5zzrHCyAsbo+jpYViFa9wqqom5doTh5Cut8xGL5I3D24/c7N9dg3Qqw9gY8ib6l2K+cdCyP+I0xwDWJnjVU5/I765uS+CKdD7Mram6GZ0yljgPWgj/Q3dnKbar8ZUNLWnF18hPJzn6sfueMulu28QNQ4KO01p67xQs7GulRrr9qONpxn+cF3qbvDqqz5AcLjQQZ0u66qO1z3e9f4gd8gaWjf5eR9EwH3TUfnbrddv8mJNZzh5bI7S13gfgKwTudbB/l7uru09scgPJ6hQdtR5qNYqBeLULZ8f5yujcMY1ZVsds+7JXBPmGtsPf9RSvuqKdP3DpJbMjMVT93pW/4kYDwLfNYDy+O8L+f5KSeeuEL6j4OGP8OlbtOic9+MudoSbZ4+4c7vsqppCOHFdU5bCQtZfsRQW0C/dgATuNf35Jl1YaMlHNPUywdtcYBv02RqZmWsGUZC5wQHUt+d0N0pvL+bfviGtkpujc5bu4+OlD5I368n/g1dWbMQIfNlmO/BTn3iWpj1nlIv7W6O37TmGDM+ROM3gRtCw9s/xQLngFZpp601uFMj+fsfLmi7BIY4Xfe411sVw06HJm4j/qtOYC/u6XOiVoQrhpV/D3CPUJa/i1ObuIL3CH610o7GvxMdcer3LdsapROJ00v88uEImRN0VewG4G00tF1Vd4u4deibn+thMZQpuTRMz7DjiR+xwHIcZeXdYvL9WB4vCI85eljdubR3MXjZE/XuATYz7wjtnAez/iW4Xm2VDzsKYX6tMGQUmKfB8CfQo8pVUelJlNTX1L1mk4LOu6O9TMovUdlfSDUSshUlP/fbO+hCdTACAiUpcSXpxkNnbU6scQ59cWT0prWXgfsLZSzTls8GrveEKBHANZ25+ltRPLsp6gsItdviN60aq8uGjQZX14nwkEnzzNTGe/w1qf0Ct+8eS3kP6+Exq9SL7CZeDR0p+wx9LFYSNsnMrZIFW5XJVDjoI3Rnycm9mVXLd/c71i+QwwiV40gdwgw2EfCgrB+vx/xqMh/GFwn6upZqT+2WntJ0p1nBILfoDZwA3xrIZec0HZuc0rg66O/7ia6JnwhminWxcYlIiTNw07yOpXEQfwd6LalVDJrzie8Nejt60M3yfSYzE2jrkmHFYVZ/zVzusyit/ErQ3fFT8k0NujccBqHfC3W+5Lemv46Wj+Wg/iJ7WnI5NYJB1uFvbH3F3Kqo9ASIVlnFRSOkPAlot71BV4/Klpccm8mm9od7S/Rv0IiP8dpSbzKY9+CO71VuW+qbgWPPpHOfDLI94p/7LPWM5X2H1M+Cg1NUoC8pdt3LYPTQtndBoK1d2UvTp/1gBt6n87iKWEatWFYdfkdbZ135hgQa6dWj7zcbNkXoRDkp18UUNO4DOdhy3cS6temJCbGyvmEVl8IerT0zUxpOdNvS84B+GndqN/nt7Shk6ixgPtTv2gA/UfdlpjR+A60fv3OwK5tUg6Cbjagq+E56SsNEv3PjdOUUHeJZwU4MdqVsbEJCgSl3AtNc8LSegbyQPstw+GKU39NzxT3nsx8KWNQFDOgyv1uUY+tAfM+TjEWXYp5JarFUM8w3hiYcBT+H+j3tS+QYFiwb3lsp33YTdm2ilN9pAF/optaegTXzIG2LAP86riI/FndTE9r2gfTbKaq38zu6su4g2+rfM3Bd8K1/mZmUmAID/hECpT62wydrpN6QKZrf8gFSgB9wCVgETVVr0HrVWhr6JRjh0dR/ZWmQuBAauVxX1OxP/XdyQGXat7yZtlbn0aeMZ+t8isi6Lck1mUmNR6QnNeyLVcOGW32yKXdmON7fEVwNf0uJ9SK26nGcqVfJrx/3q/7Zfm/HE7i+L0ZpaPa7Oo/QfvZOkr1MCSNsuUKC++kp5QrahiKll/m93RwwZB+eFlrMJJO83AULKBt09qpsDoUvUFkO1nxZaBvaPC7o71ZekdqX8o/j+P/riD8R+pgo6UkrBKBG96/Ij6v9mc+QEFjXBxvbjvUs/xbahIJiRXXgD6MfMMCt6XgjJgc97XeAt1jEipxk7ksK7C8x9o5HMbyVEqrXfLqJe4RMaSK06ymjZWV2HUo1lIALcFh3ez+wZSmPfkKtyiQ3QIeHYKEw7tNvQEMXku9bbJa+zfeLL6LXLnDiDU0QzNekLEqeI/3N9w95jkMmCDAhJS28QW1fXMsqv0CnJjU80XLKDpkS5T7CqdWB51jj493pfaERhQV1h6TdNHdmHj78Rx5pHz79+6Y07izxB6JlyGD2uttPgvF0QSwQjvGpv9t/+L4l/rNeml7GilQ5q6TMgSFpBsmv6JxdU6c0vCrzPGYiO/Tr/qNgFRiKbznX+B3roGFrUeqEhlXCbIAkBn29+E4VwctoJw1gD7cEFp1MOuYD0eC6EIocp9rcDW+p5RIFc30AIp9tF2mW6fkXKR1cx6AYjRkLMdtySEsZxJ2UtDD0CtGsKTA/eIjTlC0b751utzRauV1NkINn+BhM7wRX7oRnYJ9pue4twL2HId4ggPPqfaBjV0cqn6Sy61h/sFfWKa4WjRuXYJwa48S7nm39WitnHgX/kcm9P9Iyucx0uaEjCD/y12WmrQz8ddIvvgf7Isi5aGJFJW5M7gFz351LvjLpXFyOJyG7v4QDFUsQXglFm2xtFXBkucDaJWkoFI5t5ZiXkJN78QxyEakJwTKBkXfD5REGWqhf4U9mkjof8AKqMp53goHw059WlKt4CEHdwcNN4KgWxSPrlduvmuR5i9NY4U5WPQBdYZf7WA7qU7xfKGmohXYF1S0Tt3vJW5/5BkxjD8bM0wiWS4FlBzILHMiS4BPcy8M8fOGOd0stU2bNirU9SdAAlFiCFObbuOCUg7oqz8aqFKYVBnyRBNM2Gatd1W6E5zr+crRDlJL9elX6UXLsDbxiDQitYFLYV1Dsrfx+Alw+QBFxmN8y3ofhVfLkhdY7tcnAN3Uld+K8OAkc7u33Uo3W342oyEPQWg8KBcJZ7a20f01gFc2iDo4oCrK+cm05IDPQwXOSlfIrUCZUblhRgRYtOZMP4gVroqC4Ih0tOboCOiR9YFUGWfY6e6qcNBJv+sV3vLfEA+Bb2gi1FVwqJunRzA2NBdofDb6vtn39M/q6ipJwZ5tLZ2QcGAZNHDiHrrBC5SoAFkPk4y39F0pyzFiVluRDC79iTd0xrD+wh+KwUQ0vQfAUA3EGoEuIf6OQlqQBPMAfTVvow2CMrXMPEDcRxeY+cMDVEkE5xeTHL6NKMgSWt17ol7tshFeolatWmgUCWHz7Mb2wm4wborFEgiPIPwX+vDw5tdHwCySIodGw/g/7vU0FiKlU/PwywYaJhOY3n7mcMQB6FwzABlkMws0m6T4snB9lOhCHv9QptnHTFCHtl6q+7gPQJM40HY21sMX7PD4IWlm5QSjt7e1n4MgA6DBZBHfKWg5hHPhOEdZ4+MtyIRo2L1bKJOuIhauqGSy4O3RPmM7zs+sM8Sj1GYnj9NnvYQLPC9zgephQrWiI2gruFG3dUV4fRCTakel/6sM6F8LLM0xTJj4gExcRKQXbIAnMsjAY5Mtqi3T1nY/7YS80pt2xIC43+SwLovenMfi+lT6+irGl/sKon2Mrb7hwSs5NhxmzngwtEU3wywIXTP/qoKhoMSygP89MKWmWCpYmxho8wcgX6ephyi8Kvmvq4MPzrEd8rRYHvvWCrhgeixVnjLuJCg8D7o3at1fRf4wT/pmA3i7QEPiQweL7ts7KpWGIpX1MEq0PIhakWG9jzdHawDAXCtkFiVPAtWVToIZfJUXrdLU/BVrZG2Z3AdNYC8kME2HyrzcrTDkfZsEwEIjJU829MvfAEH8EECDDvjufABbCeiR84V+iK1gV079nun05dZPP8n8FvxctWcB/0R4W46Yza0/JJ5YIZfCL04WRNnSmaZ9wW/oLD1yRI4I2OjcTM7Q89g7zHhyLObmJYZSoyPm4NsRtdjXpWG6m5kubbNs6GZqZQ9r/pbwIcQdihX2R0q9g/mIREHTSis/zPgz70McrzMOsAiwilAp9UN/aKgxcBHNKg1/HiUylvM/5zHXQtocRmrPB/yLiJoD8pDTJsXW/CEksaINPMIKF4avyjm5puATTl6T1GUtY0WUw0gG0HaAU5TGTAQ6klnWCZLI9fTzWoPR9r8yN1Dev3hHXW34BCN/UcwvJngWWI/ktQqcoUE4ftEX5rHEyAZmeV7rexGUPc8uNMdGBJ/Mm3WaV2UxqlYBbzq6PW7ka+2zzzEdx0DNdV3HUnrLvphwuMlOfDa1uMh1Ekj+BY1FYnhC8e37nUcTfQJP+yju6gV4g8IGQgYO5/qtYNK8A33flqgvjLmSO0m6KP2b53gEmrbaYI8EVZlknkk/aqIyCvJVueWjuIwjippEJGSbX26aOSIrbw+rrPlT1dz3PgC3i2G3pfFH1NiPkjwCS9yqSgWmYdQ5GrHVFbTHEt87rWn82g2SX9NSmR43LikFvLI/3KmVr4wtLYl27yMa3jY84MBqvFEennscpstX48VMIgTTMsQKV9QKsgcetihoRDOn+rIPPvZbEfrnkEZeO4BmXxnzuxfgeaZ63axJXQXxdNPR+XZ2o5aKll1GXLlUcwcLEYoz44Qw9U6+f84t0FWtR82d8SZGM46BUV8G0O3u152rNnAoo04YBUC4PKsrIfYH7N4AptQIKvpMj0XnFm0B9wa6oe5L4P+K22pvI39suGl8fx4npyL24RJ4XggfODSwpfV3XNN6r3dwnyVpC+0UTlnJEhxTty8LtdLefSd2iaxsuYLXQMv5ardKKTzB6p9RXrBf3EZO0/ouUt1TXJQ6h187D8MjYdSjD6K9SHMyelRBBhfnNJCzfdWhsNvN1vNQ/Ie8rzO0goIK5uGHvJ3691sW/J/6v0Mhe4KZM8gLW60x8ngUTG4ub9k9FunQNgvo5J9r4NPg4StiZrNjj8mNpAeFY+RDbKmR4dxRtF5WoF1omxd6QlzAoyrYaPOWu5X73r2kVWRsrbvhvXU4HqGA+4iWC773G9Z27vA2pp3RZ1V3A9TzuzxlYJHMz2dhz0EMVQiwv8NDSZYz5ttfo26WrlOMapjU6/ekC4wuKcZVtz7xDK/jvs4ZFv49r40bG6ZV0/A1WZfU1lP+s75Svou5fM9cxi84vJa6dtr5m1SYeoGM+gxDdQD9UEf8Sfx3S/9AIdEcQxilLTVltZFcMP5T3G3S3u5rFIIBpDaPPnqaM1YI7u6rhRfryUBbeQE+l04DrGZACAw0q3ZyqtaD1TbRpyRl0eRIx9bDYANxVynt7WDndgk2k0MADBe4I6EV2bT2Gjd9O/GRg3BNYZAnt6WL9oxblGFv3Y109jAARYYTyYr7/TFquSU4+C0VPoGFlCI8o45W56wKeA78MWbaj67UvYA5zbRCJPJNPr79MKRHjshNrA2WaW0zv81pT19vViYtI8zfGxhtObcM5fmvqWo5CegF4K6jP6mxJP8v7ddKfKEg/oJyZXHJ2JnEv2bpqA+NjnmNzQGBxaTkDzdAk3yV2dUz1WSU5G9ecLPzhnqRMHNxaFdW3e2tTD+FN+YU0zSu1H+SrU3Doe97tErd0WUu+3fKwhaFAUFuY68Mml+V3sgySwEmpAviDssIIBnIuA5r7qxGuMBV6HdcWqIHY+JAf2ziYsikZocFyGgovogMcZLaCkNcFXevnRIrLrjITT1IzVsdHIjjCVpm5FJyk1YmOtq7UCRD26+YV/miY12Os6PgUytdk4gKvc/281qlNmLOyGiN5FGmPQIP9La4O1DO9UeKNqcp3etX1k2Lbn/Y47dzHXZe6omVK4wJpS2wD2jNMBeK6lnxfsOzs25YfwW+aD0Wues1dl57oB94zYZylcg/7LelJFcXBBmdjZ9CRcycGxcVP599rXJOqu2VK/BFW5wjXoC/dq7z21l3R3temR8XPj69Mn4UBsAuC+cxSlZ278uRRffHmt/diQO/Zn+tdaJfZd2gv8j2aWBG0po5nsu9ZJmaZF9F5mpSBx0v6iybTtkkNE9m0egcM6iswiw67t7MZH/hrLSTCRB/DYDsdzbbJa82cy9zDvTJRq1LpyZTwisAc9PUcbweWcVUFrj8L2JgRRwn0e1HGrDMRqnsxt3M9k9rXSHrivuTnst9GML3suO5pruWMkHjHVke5remv6EA/nnk7fmF8ZPJsDLmdmQv6KQz499Q71mtf/3bKa0IyEcK9QrJfg+CXO/e4b6cnYkItkWcJrKy5Aia2qGXyiDfq5rc2YOJ8G6Aava72E6UPY82rd/U62la2TW3oZLHKAW3dqTPAy27ehvSVYuXLGPM6274J4oybh765ze/vWdE2pWEJisOvEWmmnkg8a4QaTZ/htyVHic2CmHO8zvQrrFxD2+VulSmNp9CGh8DYYWiz8ys7u38j7h0mbsfgez+Nkmrc9cmpuEefQpBPVr1dj6EVYLl4R6i+nuug32fE2gKBiB20/5FqYtC9Xo7Ut4lh3YJX4dnFD7WdVHdptDl9Nt38Se5yuZgVgHdhCYzyc30/oG+eU24Oh27xF4uK1Nu0kzFi/1ngg4YXuZ1tq2uqOrrWSEQQnET70p7trtHJ5JSSdn9DtlqfzT4rg2+nxPub25I6ybGsFa6lR1h9nWfSXy/DBhyE7MOqh+UgPgtarOBiKS4MXjZ7DP0gY2QlnrHFeMb21dp9ye9qP97xlIEFSXiptvT8tqk7d9Y3t+yh3f5ziEsihKdi2oxOphLiMmINHVRIwOI+lUUmi1CUDoekffD4B/D4sLzDaC3hVI+/gqeLaNORXtfG5tZJjc/z5vnYzWuxgKwJlH2n29Vz9fozd+xg3J0Ikg0NIeAXM97X4q72kyc1LGlcsGY719WTGDnb44K8ivGwQAk3wTPUOiHaxTg6irE6vPXk7QrW4v6iyG9VoB3/jEBTruNu6QFrnaPNmSNZ0nIabw5ios4W8xNfNsBgfooWm3evGOZhnrcczDwDYrSQX37baMv4FsrkEERq6X8JLWdeX09uXvsZIzeY4kVjEq13FiT10QfBvSEsU5VhlKKtyPp6LLiB4e/Fhe/Dtf5h/jBevmUgf1Bb5NiFwiYnk1XKkZBn4ObnZh8fpszNMhQePiifKBrhMsLBbXkvmAQvZmnooP4a3Cbx/oieL2EAHLJizIolHnBb1+wu8w8FSM1Ae1cfyMvBcG3KUPgxoOzBr8zz4PzSdzNnQu0F2N4FN7nC/pUCBuc3hQ76GIhH4Ilvn7xM+8W/TU6pXzKw7YNy5cuezaj7e/TyHgqVaLggtRL3yk6byvsgHGxKOOjHh8k3eBx8ED4GlYnAewTrfn8WmiyipYcx+vpKrf54j1UyAWWiAuE3xyi8y2bjcjRWyCAgBzwOhOXv9duApOanwCoh7GvzkHcfiYtcYAOeseAyXniVp1V5GNwnA9s9EA6T9lFHXFebygh/hLgYmHfguAjTbeF3XtvbwkxbnpwBYjbDgMSCIGmZHFtMOYtlBylS8msoSUfyzOTh8CIkLZ7TfnPqLdaJTFThjyduIPL/XodInChbqG982FgYliqKaFn9Y4RGtm+ln+2/T/veXRlxU4XBCA40xHdvWApTfBTfNIiAnmUuq5I2ShDhIUxAdhFIkF3OIWPFQhk3NqqXqrGe2VU9ttVMxpl0ghtp/2PKHvf4Mr1UdmDLnIrEye5eCSIcZRXX45Q5liEkQeo1aRDw32KXc4hjITg50TYU+sJEwl3Js4BZ5nGMm5J4Fl5tKlsmjY/ldNuwTlGK8+Vy+VOhXexMN8uRQ7dO2EZ5L+4941400OU/3oGJnc8FuGVnbihoDawFF1HqMWHIhXYPaFOI54JrVfraVd4bViY5B5eAseTM7uVj8dkLThiY49Q4e2l9YSc7swym3LAuwaW4aeQYeNnRHOJ7MN4GNIP8+T6SPPld1mjps/ICS3afC9xh+ZJPdjxLH4ZlWlj0YV9tqhM4RDiG/SPtywsRvzrxZqXllv46eXL9KpMvVBRMP7ATOwyyyzpPf0KTFjRpK2hIJXqDQt8U6aEPAAACoklEQVTlNoNL8oE//EA3YPWZuQmzYXL9GlG+Nmde0KzZxR/WFdLKe+Bu3NhlztJl4BwN3dDqeNpncMyudGlbiAuhS8GnocWQ5sCrxEv7JEh6mMG42xXzmFY2MzJxcPytJHtH9BdB2c9tW81dedKovsTc5DOyWCafh3op1NSjljmmX2W8hONGvgUmQyPUI7vpw34L08tCF4mXvgvDJhoWfIzPjz1odalamh/3QbCAyHpJbjZnCw5CXBochnlmS93siC+Mx8E4EeHxLhoC3lAICRyCn8HwhXBu4Xe+IVuY6R9PDlObJdfSFo6KKBRIR37SK7LG08T9oB8mg62RuJqKxdUE0kwqI1yMeznPf4XezX+zQELSMEZZfWNcY8LWWA4IR3lSWf4jVdGGZ2XJa6G6sKNA+CANNkww9P0fjAFoMLRK/oNbaZomDGVb07gIspApmhHIwPs4h83hfQfSzXDzL6KJ94LtHSg/tr/yXPlfBZ4gLjzHaKC0FniQktGelu1Zj7gjImEHDJCR0Gkc3zRL1LglkHkMtCpxv8iOUxzZsrInaGMt/CpbBewo9lcElcVviM9vs+aJNN9vPKvW0Cq29aDarKKhh489BoT+sNgKGuXHmwFuFTJp3+0cHxJaaltVxvtk2kzTf590H5dXofUSwmPOtxKLTdzG/+I7YQQGWXBhrKYQwKHvLcOACARxJ4m7YVsF6Rgp0xA7A2ooDGFgCANDGBjCwDbBwLZj1NsCnLyPNj+BbawT8Wsvk+tI1dIN+GNFYzATXEw8DnQ/SFo5qE38yH9T1rjqZZZJLz77vETP+xm3BYxDZQxhYAgDQxgYwsC/IwYQFCIswj9ZoRL+NisK/h3bNATzEAaGMDCEgSEMDGFgCANDGBjCwBAG/h9h4P8A1o75+m6cd4AAAAAASUVORK5CYII=) Object detection: Bounding box regression with Keras, TensorFlow, and Deep Learning by [PyImageSearch.com](http://www.pyimagesearch.com) Welcome to **[PyImageSearch Plus](http://pyimg.co/plus)** Jupyter Notebooks!This notebook is associated with the [Object detection: Bounding box regression with Keras, TensorFlow, and Deep Learning](http://pyimg.co/gk8s6) blog post published on 10-05-20.Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.We recommend that you execute (press โ–ถ๏ธ) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:* [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.htmlnotebook-user-interface)* [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)As a reminder, these PyImageSearch Plus Jupyter Notebooks are not for sharing; please refer to the **Copyright** directly below and **Code License Agreement** in the last cell of this notebook. Happy hacking!*Adrian****Copyright:*** *The contents of this Jupyter Notebook, unless otherwise indicated, are Copyright 2020 Adrian Rosebrock, PyimageSearch.com. All rights reserved. Content like this is made possible by the time invested by the authors. If you received this Jupyter Notebook and did not purchase it, please consider making future content possible joining PyImageSearch Plus at [http://pyimg.co/plus/](http://pyimg.co/plus) today.* Install the necessary packages ###Code !pip install tensorflow==2.2.0 ###Output _____no_output_____ ###Markdown Download the code zip file ###Code !wget https://s3-us-west-2.amazonaws.com/static.pyimagesearch.com/bounding-box-regression/bounding-box-regression.zip !unzip -qq bounding-box-regression.zip %cd bounding-box-regression ###Output _____no_output_____ ###Markdown Blog Post Code Import Packages ###Code # import the necessary packages from tensorflow.keras.applications import VGG16 from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Input from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img from tensorflow.keras.models import load_model from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import mimetypes import argparse import imutils import cv2 import os ###Output _____no_output_____ ###Markdown Function to display images in Jupyter Notebooks and Google Colab ###Code def plt_imshow(title, image): # convert the image frame BGR to RGB color space and display it image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) plt.imshow(image) plt.title(title) plt.grid(False) plt.show() ###Output _____no_output_____ ###Markdown Define our `Config` class ###Code class Config: # define the base path to the input dataset and then use it to derive # the path to the images directory and annotation CSV file BASE_PATH = "dataset" IMAGES_PATH = os.path.sep.join([BASE_PATH, "images"]) ANNOTS_PATH = os.path.sep.join([BASE_PATH, "airplanes.csv"]) # define the path to the base output directory BASE_OUTPUT = "output" # define the path to the output serialized model, model training plot, # and testing image filenames MODEL_PATH = os.path.sep.join([BASE_OUTPUT, "detector.h5"]) PLOT_PATH = os.path.sep.join([BASE_OUTPUT, "plot.png"]) TEST_FILENAMES = os.path.sep.join([BASE_OUTPUT, "test_images.txt"]) # initialize our initial learning rate, number of epochs to train # for, and the batch size INIT_LR = 1e-4 NUM_EPOCHS = 25 BATCH_SIZE = 32 # instantiate the config class config = Config() ###Output _____no_output_____ ###Markdown Implementing our bounding box regression training script with Keras and TensorFlow ###Code # load the contents of the CSV annotations file print("[INFO] loading dataset...") rows = open(config.ANNOTS_PATH).read().strip().split("\n") # initialize the list of data (images), our target output predictions # (bounding box coordinates), along with the filenames of the # individual images data = [] targets = [] filenames = [] # loop over the rows for row in rows: # break the row into the filename and bounding box coordinates row = row.split(",") (filename, startX, startY, endX, endY) = row # derive the path to the input image, load the image (in OpenCV # format), and grab its dimensions imagePath = os.path.sep.join([config.IMAGES_PATH, filename]) image = cv2.imread(imagePath) (h, w) = image.shape[:2] # scale the bounding box coordinates relative to the spatial # dimensions of the input image startX = float(startX) / w startY = float(startY) / h endX = float(endX) / w endY = float(endY) / h # load the image and preprocess it image = load_img(imagePath, target_size=(224, 224)) image = img_to_array(image) # update our list of data, targets, and filenames data.append(image) targets.append((startX, startY, endX, endY)) filenames.append(filename) # convert the data and targets to NumPy arrays, scaling the input # pixel intensities from the range [0, 255] to [0, 1] data = np.array(data, dtype="float32") / 255.0 targets = np.array(targets, dtype="float32") # partition the data into training and testing splits using 90% of # the data for training and the remaining 10% for testing split = train_test_split(data, targets, filenames, test_size=0.10, random_state=42) # unpack the data split (trainImages, testImages) = split[:2] (trainTargets, testTargets) = split[2:4] (trainFilenames, testFilenames) = split[4:] # write the testing filenames to disk so that we can use then # when evaluating/testing our bounding box regressor print("[INFO] saving testing filenames...") f = open(config.TEST_FILENAMES, "w") f.write("\n".join(testFilenames)) f.close() # load the VGG16 network, ensuring the head FC layers are left off vgg = VGG16(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3))) # freeze all VGG layers so they will *not* be updated during the # training process vgg.trainable = False # flatten the max-pooling output of VGG flatten = vgg.output flatten = Flatten()(flatten) # construct a fully-connected layer header to output the predicted # bounding box coordinates bboxHead = Dense(128, activation="relu")(flatten) bboxHead = Dense(64, activation="relu")(bboxHead) bboxHead = Dense(32, activation="relu")(bboxHead) bboxHead = Dense(4, activation="sigmoid")(bboxHead) # construct the model we will fine-tune for bounding box regression model = Model(inputs=vgg.input, outputs=bboxHead) # initialize the optimizer, compile the model, and show the model # summary opt = Adam(lr=config.INIT_LR) model.compile(loss="mse", optimizer=opt) print(model.summary()) # train the network for bounding box regression print("[INFO] training bounding box regressor...") H = model.fit( trainImages, trainTargets, validation_data=(testImages, testTargets), batch_size=config.BATCH_SIZE, epochs=config.NUM_EPOCHS, verbose=1) # serialize the model to disk print("[INFO] saving object detector model...") model.save(config.MODEL_PATH, save_format="h5") # plot the model training history N = config.NUM_EPOCHS plt.style.use("ggplot") plt.figure() plt.plot(np.arange(0, N), H.history["loss"], label="train_loss") plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss") plt.title("Bounding Box Regression Loss on Training Set") plt.xlabel("Epoch #") plt.ylabel("Loss") plt.legend(loc="lower left") plt.show() ###Output _____no_output_____ ###Markdown Implementing our bounding box predictor with Keras and TensorFlow ###Code # # construct the argument parser and parse the arguments # ap = argparse.ArgumentParser() # ap.add_argument("-i", "--input", required=True, # help="path to input image/text file of image filenames") # args = vars(ap.parse_args()) # since we are using Jupyter Notebooks we can replace our argument # parsing code with *hard coded* arguments and values args = { "input": "output/test_images.txt" } # determine the input file type, but assume that we're working with # single input image filetype = mimetypes.guess_type(args["input"])[0] imagePaths = [args["input"]] # if the file type is a text file, then we need to process *multiple* # images if "text/plain" == filetype: # load the filenames in our testing file and initialize our list # of image paths filenames = open(args["input"]).read().strip().split("\n") imagePaths = [] # loop over the filenames for f in filenames: # construct the full path to the image filename and then # update our image paths list p = os.path.sep.join([config.IMAGES_PATH, f]) imagePaths.append(p) # load our trained bounding box regressor from disk print("[INFO] loading object detector...") model = load_model(config.MODEL_PATH) # loop over the images that we'll be testing using our bounding box # regression model for imagePath in imagePaths: # load the input image (in Keras format) from disk and preprocess # it, scaling the pixel intensities to the range [0, 1] image = load_img(imagePath, target_size=(224, 224)) image = img_to_array(image) / 255.0 image = np.expand_dims(image, axis=0) # make bounding box predictions on the input image preds = model.predict(image)[0] (startX, startY, endX, endY) = preds # load the input image (in OpenCV format), resize it such that it # fits on our screen, and grab its dimensions image = cv2.imread(imagePath) image = imutils.resize(image, width=600) (h, w) = image.shape[:2] # scale the predicted bounding box coordinates based on the image # dimensions startX = int(startX * w) startY = int(startY * h) endX = int(endX * w) endY = int(endY * h) # draw the predicted bounding box on the image cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2) # show the output image plt_imshow("Output", image) ###Output _____no_output_____
.ipynb_checkpoints/(200819) 3.-checkpoint.ipynb
###Markdown โ–  ํ…์„œ ํ”Œ๋กœ์šฐ๋ฅผ ํ™œ์šฉํ•œ ์‹ ๊ฒฝ๋ง ๊ตฌ์„ฑ ๋ณต์Šต 1. ํ…์„œํ”Œ๋กœ์šฐ 1.x ๋ฒ„์ „ ๊ธฐ๋ณธ๋ฌธ๋ฒ• ๊ทธ๋ž˜ํ”„ ์ƒ์„ฑ ์˜์—ญ -------------- ๊ทธ๋ž˜ํ”„ ์‹คํ–‰ ์˜์—ญ 2. ํ…์„œ ํ”Œ๋กœ์šฐ๋กœ ์ด์šฉํ•œ ๋‹จ์ธต ์‹ ๊ฒฝ๋ง 3. ํ…์„œ ํ”Œ๋กœ์šฐ๋ฅผ ์ด์šฉํ•œ ๋‹ค์ธต ์‹ ๊ฒฝ๋ง 4. ํ…์„œ ํ”Œ๋กœ์šฐ๋ฅผ ์ด์šฉํ•ด CNN์„ ์ด์šฉํ•œ ์‹ ๊ฒฝ๋ง MNIST ๋ฐ์ดํ„ฐ๋Š” load_mnist ํ•จ์ˆ˜๋งŒ์„ ์ด์šฉํ•ด์„œ ์‰ฝ๊ฒŒ ๋ฐ์ดํ„ฐ๋ฅผ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๊ฒŒ ๊ตฌ์„ฑํ–ˆ๋Š”๋ฐ ํ˜„์—…์—์„œ ์‹ ๊ฒฝ๋ง์„ ๊ตฌํ˜„ํ•  ๋•Œ๋Š” load_mnist์™€ ๊ฐ™์€ ํ•จ์ˆ˜๋ฅผ ์ง์ ‘ ์ƒ์„ฑํ•ด์•ผํ•˜๋ฏ€๋กœ ์‹ ๊ฒฝ๋ง์œผ๋กœ ๋ฐ์ดํ„ฐ๋ฅผ ๋กœ๋“œํ•˜๋Š” 4๊ฐœ ํ•จ์ˆ˜๋ฅผ ์ƒ์„ฑ 1. image_load 2. label_load 3. next_batch 4. shuffle_batch โ–  ์ดํŒŒ๋ฆฌ ๋ฐ์ดํ„ฐ๋ฅผ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๋Š” 4๊ฐœ์˜ ํ•จ์ˆ˜ ์ƒ์„ฑ 1. ์งˆ๋ณ‘ ์ดํŒŒ๋ฆฌ ์‚ฌ์ง„ ํ™•์ธ darknamer.exe ํ”„๋กœ๊ทธ๋žจ์„ ์ด์šฉํ•ด์„œ ์‚ฌ์ง„์ด๋ฆ„์„ ์ „๋ถ€ ์ˆซ์ž๋กœ ๋ณ€๊ฒฝ 1 ~ 10000๋ฒˆ์œผ๋กœ ์ด๋ฆ„ ๋ณ€๊ฒฝ 1-9500๋ฒˆ๊นŒ์ง€ trainํด๋”์— ๋„ฃ๊ณ  9500-10000๋ฒˆ๊นŒ์ง€ test ํด๋” ๋„ฃ๊ณ  2. ํ˜„์—…์—์„œ CNN์„ ํ™œ์šฉํ•ด์„œ ๊ฐ€์žฅ ๋งŽ์ด ์‘์šฉํ•˜๋Š” ๋ถ€๋ถ„: ๊ณต์žฅ์—์„œ ๋งŒ๋“  ์ œํ’ˆ์— ๋Œ€ํ•œ ๋ถˆ๋Ÿ‰ํ’ˆ ๋ถ„๋ฅ˜ --> ํ’ˆ์งˆ๊ด€๋ฆฌ์š”์› 3. 256*256 ์‚ฌ์ด์ฆˆ์˜ ์ด๋ฏธ์ง€๋“ค์„ 32*32๋กœ ์ผ๊ด„ ์กฐ์ • D:/data/leafs/images/train D:/data/leafs/images/test train, test ๋ฐ์ดํ„ฐ ๋‘˜๋‹ค resize ###Code import cv2 import os import numpy as np path = "D:/data/leafs/images/train" file_list = os.listdir(path) for k in file_list: img = cv2.imread(path + '/' + k) width, height = img.shape[:2] resize_img = cv2.resize(img, (32 , 32), interpolation=cv2.INTER_CUBIC) cv2.imwrite('D:/data/leafs/images/train_resize/' + k, resize_img) import cv2 import os import numpy as np path = "D:/data/leafs/images/test" file_list = os.listdir(path) for k in file_list: img = cv2.imread(path + '/' + k) width, height = img.shape[:2] resize_img = cv2.resize(img, (32 , 32), interpolation=cv2.INTER_CUBIC) cv2.imwrite('D:/data/leafs/images/test_resize/' + k, resize_img) ###Output _____no_output_____ ###Markdown ๊ฑด๊ฐ•ํ•œ ์ดํŒŒ๋ฆฌ : train 1 ~ 9500 ์งˆ๋ณ‘ ์ดํŒŒ๋ฆฌ : train 9501 ~ 19000 test 1 ~ 500 test 501 ~ 1000 4. ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์˜ ๋ผ๋ฒจ๊ณผ ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ์˜ ๋ผ๋ฒจ์„ csv๋กœ ์ƒ์„ฑํ•˜์‹œ์˜ค ###Code path = 'd:/data/leafs/images/train_label.csv' file = open(path, 'w') for _ in range(0, 9500): file.write(str(1) + '\n') for _ in range(0, 9500): file.write(str(0) + '\n') file.close() path = 'd:/data/leafs/images/test_label.csv' file = open(path, 'w') for _ in range(0, 500): file.write(str(1) + '\n') for _ in range(0, 500): file.write(str(0) + '\n') file.close() ###Output _____no_output_____ ###Markdown 5. cifar10 ๋ฐ์ดํ„ฐ๋ฅผ ์‹ ๊ฒฝ๋ง์— ๋กœ๋“œํ•˜๊ธฐ ์œ„ํ•ด์„œ ๋งŒ๋“  4๊ฐ€์ง€ ํ•จ์ˆ˜๊ฐ€ ์žˆ๋Š” loader2.py์˜ ๋‚ด์šฉ์„ ๋ณต์‚ฌํ•ด์„œ loader_leaf.py๋กœ ๋งŒ๋“  ํ›„ ์•ˆ์˜ ๋‚ด์šฉ์„ ์ดํŒŒ๋ฆฌ ๋ฐ์ดํ„ฐ๋ฅผ ๋กœ๋“œํ•  ์ˆ˜ ์žˆ๊ฒŒ๋” ์ˆ˜์ •ํ•˜์‹œ์˜ค image_load label_load next_batch shuffle_batch 6. ์ดํŒŒ๋ฆฌ ๋ฐ์ดํ„ฐ๊ฐ€ ์ž˜ load ๋˜๋Š”์ง€ ํ™•์ธํ•˜์‹œ์˜ค ###Code import loader_leaf as ll train_image = 'd:/data/leafs/images/train_resize/' test_image = 'd:/data/leafs/images/test_resize/' train_label = 'd:/data/leafs/images/train_label.csv' test_label = 'd:/data/leafs/images/test_label.csv' print(ll.image_load(train_image).shape) print(ll.image_load(test_image).shape) print(ll.label_load(train_label).shape) print(ll.label_load(test_label).shape) ###Output (19000, 32, 32, 3) (1000, 32, 32, 3) (19000, 2) (1000, 2) ###Markdown โ–  ์‚ฌ์ง„์„ ๋ถ„๋ฅ˜ํ•  ์ˆ˜ ์žˆ๋Š” ์‹ ๊ฒฝ๋ง ๊ตฌ์„ฑ ํ™˜๊ฒฝ 1. ํ…์„œ ํ”Œ๋กœ์šฐ 1.x ๋ฒ„์ „์œผ๋กœ๋งŒ ์ˆ˜ํ–‰ํ•˜๋Š” ๋ฐฉ๋ฒ• 2. ํ…์„œ ํ”Œ๋กœ์šฐ 1.x ๋ฒ„์ „(1.14.0) + keras(2.3.1) ๋กœ ๊ตฌํ˜„ํ•˜๋Š” ๋ฐฉ๋ฒ• ---> ๊ตฌ๊ธ€์ด keras ์ธ์ˆ˜ 3. ํ…์„œ ํ”Œ๋กœ์šฐ 2.x ๋ฒ„์ „์œผ๋กœ ๊ตฌํ˜„ํ•˜๋Š” ๋ฐฉ๋ฒ• โ–  ์ดํŒŒ๋ฆฌ ๋ฐ์ดํ„ฐ๋ฅผ ๋ถ„๋ฅ˜ํ•˜๋Š” ์‹ ๊ฒฝ๋ง ์„ค๊ณ„๋„์˜ ํฐ ๊ทธ๋ฆผ โ€ป ๋ฌธ์ œ142. ์šฐ๋ฆฌ๊ฐ€ ๊ฐ€์ง€๊ณ  ์žˆ๋Š” ์ฝ”๋“œ๋ฅผ ํ™œ์šฉํ•ด์„œ ์•„๋ž˜์˜ ๊ทธ๋ฆผ์˜ ์‹ ๊ฒฝ๋ง์œผ๋กœ ์ดํŒŒ๋ฆฌ ๋ฐ์ดํ„ฐ๋ฅผ ๋กœ๋“œํ•˜๋Š” keras ์‹ ๊ฒฝ๋ง์„ ๊ตฌํ˜„ํ•˜์‹œ์˜ค![q1](http://cfile260.uf.daum.net/image/99A8AF365F3CADFE10C6B8)![q2](http://cfile247.uf.daum.net/image/997D90365F3CADFF115580) ###Code from keras.datasets import cifar10 from keras.models import Sequential, save_model from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D import numpy as np from keras.utils import np_utils from keras.layers.normalization import BatchNormalization import matplotlib.pyplot as plt import loader_leaf as loader3 import warnings import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' warnings.filterwarnings('ignore') plt.rcParams['figure.figsize'] = (20, 10) plt.rcParams.update({'font.size':20}) batch_size = 28 num_classes = 2 epochs = 15 train_image = 'D:\\data\\leafs\\images\\train_resize\\' test_image = 'D:\\data\\leafs\\images\\test_resize\\' train_label = 'D:\\data\\leafs\\images\\train_label.csv' test_label = 'D:\\data\\leafs\\images\\test_label.csv' x_train = loader3.image_load(train_image) y_train = loader3.label_load(train_label) x_test = loader3.image_load(test_image) y_test = loader3.label_load(test_label) print(loader3.image_load(train_image).shape) print(loader3.image_load(test_image).shape) print(loader3.label_load(train_label).shape) print(loader3.label_load(test_label).shape) # (x_train, y_train), (x_test, y_test) = cifar10.load_data() # One hot Encoding # y_train = np_utils.to_categorical(y_train) # y_test = np_utils.to_categorical(y_test) model = Sequential() model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train.shape[1:])) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (5, 5), padding='same')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), nb_epoch=epochs, batch_size=batch_size, verbose=2) scores = model.evaluate(x_test, y_test, verbose=0) # verbose: ์ง„ํ–‰๊ณผ์ • ํ‘œ์‹œ print("CNN Error: %.2f%%" % (100 - scores[1] * 100)) print(hist.history) ''' {'loss': [0.4628944396972656, 0.3074853718280792, 0.2499154955148697], 'accuracy': [0.7932631373405457, 0.8667894601821899, 0.8957894444465637], 'val_loss': [0.6377381682395935, 0.4691646695137024, 0.4328630864620209], 'val_accuracy': [0.6100000143051147, 0.7739999890327454, 0.8379999995231628]} ''' # ํ•™์Šต ์ •ํ™•์„ฑ ๊ฐ’๊ณผ ๊ฒ€์ฆ ์ •ํ™•์„ฑ ๊ฐ’์„ ํ”Œ๋กฏํŒ… ํ•ฉ๋‹ˆ๋‹ค. plt.plot(hist.history['accuracy']) plt.plot(hist.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # ํ•™์Šต ์†์‹ค ๊ฐ’๊ณผ ๊ฒ€์ฆ ์†์‹ค ๊ฐ’์„ ํ”Œ๋กฏํŒ… ํ•ฉ๋‹ˆ๋‹ค. plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() ###Output (19000, 32, 32, 3) (1000, 32, 32, 3) (19000, 2) (1000, 2) Train on 19000 samples, validate on 1000 samples Epoch 1/15 - 149s - loss: 0.6557 - accuracy: 0.6829 - val_loss: 1.3807 - val_accuracy: 0.2730 Epoch 2/15 - 146s - loss: 0.5450 - accuracy: 0.7405 - val_loss: 1.6024 - val_accuracy: 0.4080 Epoch 3/15 - 142s - loss: 0.5213 - accuracy: 0.7615 - val_loss: 1.7125 - val_accuracy: 0.2850 Epoch 4/15 - 141s - loss: 0.5056 - accuracy: 0.7735 - val_loss: 1.5888 - val_accuracy: 0.2390 Epoch 5/15 - 147s - loss: 0.4909 - accuracy: 0.7856 - val_loss: 1.9389 - val_accuracy: 0.2410 Epoch 6/15 - 150s - loss: 0.4790 - accuracy: 0.7962 - val_loss: 1.6832 - val_accuracy: 0.2100 Epoch 7/15 - 169s - loss: 0.4707 - accuracy: 0.8010 - val_loss: 1.7175 - val_accuracy: 0.1850 Epoch 8/15 - 154s - loss: 0.4602 - accuracy: 0.8114 - val_loss: 1.7233 - val_accuracy: 0.1970 Epoch 9/15 - 148s - loss: 0.4509 - accuracy: 0.8146 - val_loss: 1.6784 - val_accuracy: 0.1590 Epoch 10/15 - 149s - loss: 0.4377 - accuracy: 0.8251 - val_loss: 1.7354 - val_accuracy: 0.1800 Epoch 11/15 - 148s - loss: 0.4357 - accuracy: 0.8263 - val_loss: 1.6926 - val_accuracy: 0.2200 Epoch 12/15 - 143s - loss: 0.4254 - accuracy: 0.8315 - val_loss: 1.8305 - val_accuracy: 0.1570 Epoch 13/15 - 142s - loss: 0.4189 - accuracy: 0.8341 - val_loss: 1.6726 - val_accuracy: 0.1540 Epoch 14/15 - 140s - loss: 0.4077 - accuracy: 0.8401 - val_loss: 1.9250 - val_accuracy: 0.1600 Epoch 15/15 - 137s - loss: 0.3973 - accuracy: 0.8465 - val_loss: 1.8647 - val_accuracy: 0.1490 ###Markdown โ–  tensorflow 2.x ํ…์„œ ํ”Œ๋กœ์šฐ 2.x์˜ ๊ฐ€์žฅ ํฐ ์žฅ์  ์ค‘ ํ•˜๋‚˜๋Š” ์ฆ‰์‹œ ์‹คํ–‰๋ชจ๋“œ๋ฅผ ์ง€์› ํ…์„œ ํ”Œ๋กœ์šฐ 1.x์—์„œ๋Š” ๊ณ„์‚ฐ ๊ทธ๋ž˜ํ”„๋ฅผ ์„ ์–ธํ•˜๊ณ  ์ดˆ๊ธฐํ™” ํ•œ ํ›„์— ์„ธ์…˜์„ ํ†ตํ•ด ๊ฐ’์„ ํ๋ฅด๊ฒŒ ํ•˜๋Š” ๋“ฑ์˜ ๋งŽ์€ ์ž‘์—…์„ ํ•„์š” ์ฆ‰์‹œ ์‹คํ–‰๋ชจ๋“œ๋ฅผ ํ†ตํ•ด ํ…์„œ ํ”Œ๋กœ์šฐ๋ฅผ ํŒŒ์ด์ฌ์ฒ˜๋Ÿผ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๊ฒŒ ๋˜์—ˆ๋‹ค๋Š”๊ฒŒ 2.x ๋ฒ„์ „์—์„œ ๊ฐ€์žฅ ํฌ๊ฒŒ ๋ณ€๊ฒฝ๋œ ์‚ฌํ•ญ ๋”ฅ๋Ÿฌ๋‹ ๋ฉด์ ‘๋ฌธ์ œ: ํ…์„œ ํ”Œ๋กœ์šฐ 1.x๋ฒ„์ „๊ณผ 2.x๋ฒ„์ „์˜ ๊ฐ€์žฅ ํฐ ์ฐจ์ด? ์ฆ‰์‹œ ์‹คํ–‰๋ชจ๋“œ๋ฅผ ํ†ตํ•ด ์ž‘์—… ๊ฒฐ๊ณผ๋ฅผ ๋ฐ”๋กœ ํ™•์ธํ•  ์ˆ˜ ์žˆ์Œ ์˜ˆ์ œ1. ์ฆ‰์‹œ ์‹คํ–‰๋ชจ๋“œ๋ฅผ ํ†ตํ•œ ์—ฐ์‚ฐ keras_study ๊ฐ€์ƒํ™˜๊ฒฝ์œผ๋กœ ์‹คํ–‰ ###Code import tensorflow as tf import numpy as np a = tf.constant(3) b = tf.constant(2) print(tf.add(a,b)) print(tf.substract(a,b)) print(tf.multiply(a,b).numpy()) print(tf.divide(a,b).numpy()) ###Output _____no_output_____ ###Markdown ์˜ˆ์ œ2. @tf.function ๊ธฐ๋Šฅ @tf.function์€ ํ…์„œ ํ”Œ๋กœ์šฐ์—์„œ ์ž๋™์œผ๋กœ ๊ทธ๋ž˜ํ”„๋ฅผ ์ƒ์„ฑํ•ด์ฃผ๋Š” ๊ธฐ๋Šฅ ํŒŒ์ด์ฌ์œผ๋กœ ๊ตฌ์„ฑ๋œ ์ฝ”๋“œ๋ฅผ ๊ณ ํšจ์œจ ํ…์„œ ํ”Œ๋กœ์šฐ ๊ทธ๋ž˜ํ”„๋กœ ๋ณ€ํ™˜ํ•ด์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋‹ค ํ…์„œ ํ”Œ๋กœ์šฐ ๊ทธ๋ž˜ํ”„๋กœ ๋ณ€ํ™˜ํ•˜์—ฌ ์‚ฌ์šฉํ•œ๋‹ค๋Š” ๊ฒƒ์€ GPU ์—ฐ์‚ฐ์ด ๊ฐ€๋Šฅํ•˜๋‹ค๋Š” ์˜๋ฏธ์ด๊ธฐ ๋•Œ๋ฌธ์— ์†๋„์ธก๋ฉด์—์„œ ๊ต‰์žฅํ•œ ํšจ๊ณผ๋ฅผ ๋ณผ ์ˆ˜ ์žˆ๋‹ค. ###Code import tensorflow as tf import numpy as np @tf.function def square_pos(x): if x > 0: x *= x else: x *= -1 return x print(square_pos(tf.constant(2))) ###Output _____no_output_____ ###Markdown โ–  ํ…์„œ ํ”Œ๋กœ์šฐ 2.x ๋ฒ„์ „์œผ๋กœ ํผ์…‰ํŠธ๋ก  ๊ตฌํ˜„ํ•˜๊ธฐ ์ธ๊ณต ์‹ ๊ฒฝ ์„ธํฌ ํ•˜๋‚˜๋ฅผ ๊ตฌํ˜„ ํผ์…‰ํŠธ๋ก ์—์„œ ์“ฐ์ด๋Š” ์ž…๋ ฅ๋ฐ์ดํ„ฐ์™€ ํƒ€๊ฒŸ 4๊ฐ€์ง€ 1. AND ๊ฒŒ์ดํŠธ 2. OR ๊ฒŒ์ดํŠธ 3. Non AND ๊ฒŒ์ดํŠธ ๋‹จ์ธต ------------------------------- 4. XOR ๊ฒŒ์ดํŠธ ๋‹ค์ธต โ–  AND ๊ฒŒ์ดํŠธ ###Code import tensorflow as tf from tensorflow.keras.models import Sequential # ์‹ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์„ฑ from tensorflow.keras.layers import Dense # ์™„์ „ ์—ฐ๊ฒฐ๊ณ„์ธต from tensorflow.keras.optimizers import SGD # ๊ฒฝ์‚ฌ ๊ฐ์†Œ๋ฒ• from tensorflow.keras.losses import mse # ์˜ค์ฐจํ•จ์ˆ˜์ˆ˜ # ๋ฐ์ดํ„ฐ ์ค€๋น„ x = np.array([[0,0],[1,0],[0,1],[1,1]]) y = np.array([[0],[0],[0],[1]]) # ๋ชจ๋ธ ๊ตฌ์„ฑํ•˜๊ธฐ model = Sequential() # ๋‹จ์ธต ํผ์…‰ํŠธ๋ก  ๊ตฌํ˜„ํ•˜๊ธฐ model.add(Dense(1, input_shape = (2,), activation = 'linear')) # ์ˆซ์ž๊ฐ€ ๋“ค์–ด์™€์„œ ์ˆซ์ž๋ฅผ ์˜ˆ์ธก # ๋ชจ๋ธ ์ค€๋น„ํ•˜๊ธฐ model.compile(optimizer = SGD(), loss = mse, metrics = ['acc']) # metrics: list ํ˜•ํƒœ๋กœ ํ‰๊ฐ€์ง€ํ‘œ๋ฅผ ์ „๋‹ฌ #ํ•™์Šต ์‹œํ‚ค๊ธฐ model.fit(x, y, epochs = 500) ###Output WARNING:tensorflow:From C:\Users\knitwill\anaconda3\lib\site-packages\tensorflow\python\ops\init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version. Instructions for updating: Call initializer instance with the dtype argument instead of passing it to the constructor Epoch 1/500 4/4 [==============================] - 7s 2s/sample - loss: 0.1101 - acc: 0.7500 Epoch 2/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1094 - acc: 0.7500 Epoch 3/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1089 - acc: 0.7500 Epoch 4/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1083 - acc: 0.7500 Epoch 5/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1077 - acc: 0.7500 Epoch 6/500 4/4 [==============================] - 0s 12ms/sample - loss: 0.1072 - acc: 0.7500 Epoch 7/500 4/4 [==============================] - 0s 249us/sample - loss: 0.1067 - acc: 0.7500 Epoch 8/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1062 - acc: 0.7500 Epoch 9/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1057 - acc: 0.7500 Epoch 10/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1052 - acc: 0.7500 Epoch 11/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1048 - acc: 0.7500 Epoch 12/500 4/4 [==============================] - 0s 0s/sample - loss: 0.1043 - acc: 0.7500 Epoch 13/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1039 - acc: 0.7500 Epoch 14/500 4/4 [==============================] - 0s 0s/sample - loss: 0.1035 - acc: 0.7500 Epoch 15/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1030 - acc: 0.7500 Epoch 16/500 4/4 [==============================] - 0s 750us/sample - loss: 0.1026 - acc: 1.0000 Epoch 17/500 4/4 [==============================] - 0s 499us/sample - loss: 0.1023 - acc: 1.0000 Epoch 18/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1019 - acc: 1.0000 Epoch 19/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1015 - acc: 1.0000 Epoch 20/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1011 - acc: 1.0000 Epoch 21/500 4/4 [==============================] - 0s 2ms/sample - loss: 0.1008 - acc: 1.0000 Epoch 22/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1004 - acc: 1.0000 Epoch 23/500 4/4 [==============================] - 0s 501us/sample - loss: 0.1001 - acc: 1.0000 Epoch 24/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0998 - acc: 1.0000 Epoch 25/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0994 - acc: 1.0000 Epoch 26/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0991 - acc: 1.0000 Epoch 27/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0988 - acc: 1.0000 Epoch 28/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0985 - acc: 1.0000 Epoch 29/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0982 - acc: 1.0000 Epoch 30/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0979 - acc: 1.0000 Epoch 31/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0976 - acc: 1.0000 Epoch 32/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0973 - acc: 1.0000 Epoch 33/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0970 - acc: 1.0000 Epoch 34/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0968 - acc: 1.0000 Epoch 35/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0965 - acc: 1.0000 Epoch 36/500 4/4 [==============================] - 0s 499us/sample - loss: 0.0962 - acc: 1.0000 Epoch 37/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0959 - acc: 1.0000 Epoch 38/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0957 - acc: 1.0000 Epoch 39/500 4/4 [==============================] - 0s 498us/sample - loss: 0.0954 - acc: 1.0000 Epoch 40/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0952 - acc: 1.0000 Epoch 41/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0949 - acc: 1.0000 Epoch 42/500 4/4 [==============================] - 0s 749us/sample - loss: 0.0947 - acc: 1.0000 Epoch 43/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0944 - acc: 1.0000 Epoch 44/500 4/4 [==============================] - 0s 501us/sample - loss: 0.0942 - acc: 1.0000 Epoch 45/500 4/4 [==============================] - 0s 497us/sample - loss: 0.0939 - acc: 1.0000 Epoch 46/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0937 - acc: 1.0000 Epoch 47/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0935 - acc: 1.0000 Epoch 48/500 4/4 [==============================] - 0s 499us/sample - loss: 0.0932 - acc: 1.0000 Epoch 49/500 4/4 [==============================] - 0s 499us/sample - loss: 0.0930 - acc: 1.0000 Epoch 50/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0928 - acc: 1.0000 Epoch 51/500 4/4 [==============================] - 0s 500us/sample - loss: 0.0926 - acc: 1.0000 Epoch 52/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0923 - acc: 1.0000 Epoch 53/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0921 - acc: 1.0000 Epoch 54/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0919 - acc: 1.0000 Epoch 55/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0917 - acc: 1.0000 Epoch 56/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0915 - acc: 1.0000 Epoch 57/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0913 - acc: 1.0000 Epoch 58/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0910 - acc: 1.0000 Epoch 59/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0908 - acc: 1.0000 Epoch 60/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0906 - acc: 1.0000 Epoch 61/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0904 - acc: 1.0000 Epoch 62/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0902 - acc: 1.0000 Epoch 63/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0900 - acc: 1.0000 Epoch 64/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0898 - acc: 1.0000 Epoch 65/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0896 - acc: 1.0000 Epoch 66/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0894 - acc: 1.0000 Epoch 67/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0893 - acc: 1.0000 Epoch 68/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0891 - acc: 1.0000 Epoch 69/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0889 - acc: 1.0000 Epoch 70/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0887 - acc: 1.0000 Epoch 71/500 4/4 [==============================] - 0s 0s/sample - loss: 0.0885 - acc: 1.0000 Epoch 72/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0883 - acc: 1.0000 Epoch 73/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0881 - acc: 1.0000 Epoch 74/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0879 - acc: 1.0000 Epoch 75/500 4/4 [==============================] - 0s 252us/sample - loss: 0.0878 - acc: 1.0000 Epoch 76/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0876 - acc: 1.0000 Epoch 77/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0874 - acc: 1.0000 Epoch 78/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0872 - acc: 1.0000 Epoch 79/500 4/4 [==============================] - 0s 0s/sample - loss: 0.0871 - acc: 1.0000 Epoch 80/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0869 - acc: 1.0000 Epoch 81/500 4/4 [==============================] - 0s 250us/sample - loss: 0.0867 - acc: 1.0000 ###Markdown โ€ป ๋ฌธ์ œ143. AND ๊ฒŒ์ดํŠธ ํผ์…‰ํŠธ๋ก ์„ ๊ตฌํ˜„ํ•œ ์‹ ๊ฒฝ๋ง์—์„œ ๋งŒ๋“ค์–ด๋‚ธ ๊ฐ€์ค‘์น˜๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค ###Code model.get_weights() ###Output _____no_output_____ ###Markdown โ€ป ๋ฌธ์ œ144. ์œ„์˜ AND ๊ฒŒ์ดํŠธ ํผ์…‰ํŠธ๋ก  ์‹ ๊ฒฝ๋ง์ด ์˜ˆ์ธกํ•œ ๊ฒฐ๊ณผ๋ฅผ ์ถœ๋ ฅํ•˜์‹œ์˜ค ###Code model.evaluate(x, y) result = model.predict(x) print(result) ###Output 4/4 [==============================] - 3s 698ms/sample - loss: 0.0640 - acc: 1.0000 [[-0.18919873] [ 0.27109194] [ 0.24799117] [ 0.7082819 ]] ###Markdown โ€ป ๋ฌธ์ œ145. for ๋ฌธ์„ ์ด์šฉํ•ด์„œ ์•„๋ž˜์˜ ๊ฒฐ๊ณผ์˜ ์š”์†Œ๋“ค์„ ๋ฝ‘์•„๋‚ด์‹œ์˜ค ###Code for i in result: for j in i: print(round(j)) ###Output -0.0 0.0 0.0 1.0 ###Markdown โ€ป ๋ฌธ์ œ146. OR ๊ฒŒ์ดํŠธ๋ฅผ ๊ตฌํ˜„ํ•˜์‹œ์˜ค ###Code import tensorflow as tf from tensorflow.keras.models import Sequential # ์‹ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์„ฑ from tensorflow.keras.layers import Dense # ์™„์ „ ์—ฐ๊ฒฐ๊ณ„์ธต from tensorflow.keras.optimizers import SGD # ๊ฒฝ์‚ฌ ๊ฐ์†Œ๋ฒ• from tensorflow.keras.losses import mse # ์˜ค์ฐจํ•จ์ˆ˜์ˆ˜ # ๋ฐ์ดํ„ฐ ์ค€๋น„ x = np.array([[0,0],[1,0],[0,1],[1,1]]) y = np.array([[0],[1],[1],[1]]) # ๋ชจ๋ธ ๊ตฌ์„ฑํ•˜๊ธฐ model = Sequential() # ๋‹จ์ธต ํผ์…‰ํŠธ๋ก  ๊ตฌํ˜„ํ•˜๊ธฐ model.add(Dense(1, input_shape = (2,), activation = 'linear')) # ๋ชจ๋ธ ์ค€๋น„ํ•˜๊ธฐ model.compile(optimizer = SGD(), loss = mse, metrics = ['acc']) #ํ•™์Šต ์‹œํ‚ค๊ธฐ model.fit(x, y, epochs = 500) model.evaluate(x, y) result = model.predict(x) for i in result: for j in i: print(round(j)) ###Output Epoch 1/500 4/4 [==============================] - 0s 95ms/sample - loss: 1.5322 - acc: 0.2500 Epoch 2/500 4/4 [==============================] - 0s 250us/sample - loss: 1.4493 - acc: 0.2500 Epoch 3/500 4/4 [==============================] - 0s 500us/sample - loss: 1.3715 - acc: 0.2500 Epoch 4/500 4/4 [==============================] - 0s 500us/sample - loss: 1.2985 - acc: 0.2500 Epoch 5/500 4/4 [==============================] - 0s 250us/sample - loss: 1.2301 - acc: 0.2500 Epoch 6/500 4/4 [==============================] - 0s 500us/sample - loss: 1.1658 - acc: 0.2500 Epoch 7/500 4/4 [==============================] - 0s 500us/sample - loss: 1.1055 - acc: 0.2500 Epoch 8/500 4/4 [==============================] - 0s 500us/sample - loss: 1.0489 - acc: 0.2500 Epoch 9/500 4/4 [==============================] - 0s 250us/sample - loss: 0.9958 - acc: 0.2500 Epoch 10/500 4/4 [==============================] - 0s 250us/sample - loss: 0.9459 - acc: 0.2500 Epoch 11/500 4/4 [==============================] - 0s 500us/sample - loss: 0.8991 - acc: 0.2500 Epoch 12/500 4/4 [==============================] - 0s 500us/sample - loss: 0.8552 - acc: 0.2500 Epoch 13/500 4/4 [==============================] - 0s 500us/sample - loss: 0.8139 - acc: 0.2500 Epoch 14/500 4/4 [==============================] - 0s 250us/sample - loss: 0.7752 - acc: 0.5000 Epoch 15/500 4/4 [==============================] - 0s 499us/sample - loss: 0.7388 - acc: 0.5000 Epoch 16/500 4/4 [==============================] - 0s 500us/sample - loss: 0.7046 - acc: 0.5000 Epoch 17/500 4/4 [==============================] - 0s 250us/sample - loss: 0.6725 - acc: 0.5000 Epoch 18/500 4/4 [==============================] - 0s 250us/sample - loss: 0.6424 - acc: 0.5000 Epoch 19/500 4/4 [==============================] - 0s 250us/sample - loss: 0.6141 - acc: 0.5000 Epoch 20/500 4/4 [==============================] - 0s 250us/sample - loss: 0.5874 - acc: 0.5000 Epoch 21/500 4/4 [==============================] - 0s 499us/sample - loss: 0.5624 - acc: 0.5000 Epoch 22/500 4/4 [==============================] - 0s 250us/sample - loss: 0.5389 - acc: 0.5000 Epoch 23/500 4/4 [==============================] - 0s 250us/sample - loss: 0.5168 - acc: 0.5000 Epoch 24/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4960 - acc: 0.5000 Epoch 25/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4765 - acc: 0.5000 Epoch 26/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4581 - acc: 0.5000 Epoch 27/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4408 - acc: 0.5000 Epoch 28/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4246 - acc: 0.5000 Epoch 29/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4093 - acc: 0.5000 Epoch 30/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3949 - acc: 0.5000 Epoch 31/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3813 - acc: 0.5000 Epoch 32/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3685 - acc: 0.5000 Epoch 33/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3565 - acc: 0.5000 Epoch 34/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3452 - acc: 0.5000 Epoch 35/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3345 - acc: 0.5000 Epoch 36/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3244 - acc: 0.5000 Epoch 37/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3149 - acc: 0.7500 Epoch 38/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3060 - acc: 0.7500 Epoch 39/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2975 - acc: 0.7500 Epoch 40/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2896 - acc: 0.7500 Epoch 41/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2820 - acc: 0.7500 Epoch 42/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2749 - acc: 0.7500 Epoch 43/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2682 - acc: 0.7500 Epoch 44/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2618 - acc: 0.7500 Epoch 45/500 4/4 [==============================] - 0s 750us/sample - loss: 0.2558 - acc: 0.7500 Epoch 46/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2501 - acc: 0.7500 Epoch 47/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2448 - acc: 0.7500 Epoch 48/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2397 - acc: 0.7500 Epoch 49/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2348 - acc: 0.7500 Epoch 50/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2303 - acc: 0.7500 Epoch 51/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2259 - acc: 0.7500 Epoch 52/500 4/4 [==============================] - 0s 749us/sample - loss: 0.2218 - acc: 0.7500 Epoch 53/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2179 - acc: 0.7500 Epoch 54/500 4/4 [==============================] - 0s 750us/sample - loss: 0.2142 - acc: 0.7500 Epoch 55/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2106 - acc: 0.5000 Epoch 56/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2073 - acc: 0.5000 Epoch 57/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2041 - acc: 0.5000 Epoch 58/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2010 - acc: 0.5000 Epoch 59/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1982 - acc: 0.5000 Epoch 60/500 4/4 [==============================] - 0s 250us/sample - loss: 0.1954 - acc: 0.5000 Epoch 61/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1927 - acc: 0.5000 Epoch 62/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1902 - acc: 0.5000 Epoch 63/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1878 - acc: 0.5000 Epoch 64/500 4/4 [==============================] - 0s 499us/sample - loss: 0.1855 - acc: 0.5000 Epoch 65/500 4/4 [==============================] - 0s 499us/sample - loss: 0.1833 - acc: 0.5000 Epoch 66/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1812 - acc: 0.5000 Epoch 67/500 4/4 [==============================] - 0s 750us/sample - loss: 0.1792 - acc: 0.5000 Epoch 68/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1773 - acc: 0.5000 Epoch 69/500 4/4 [==============================] - 0s 749us/sample - loss: 0.1754 - acc: 0.5000 Epoch 70/500 4/4 [==============================] - 0s 749us/sample - loss: 0.1736 - acc: 0.5000 Epoch 71/500 4/4 [==============================] - 0s 750us/sample - loss: 0.1719 - acc: 0.5000 Epoch 72/500 4/4 [==============================] - 0s 750us/sample - loss: 0.1703 - acc: 0.5000 Epoch 73/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1687 - acc: 0.5000 Epoch 74/500 4/4 [==============================] - 0s 499us/sample - loss: 0.1671 - acc: 0.5000 Epoch 75/500 4/4 [==============================] - 0s 750us/sample - loss: 0.1657 - acc: 0.5000 Epoch 76/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1642 - acc: 0.5000 Epoch 77/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1629 - acc: 0.5000 Epoch 78/500 4/4 [==============================] - 0s 499us/sample - loss: 0.1615 - acc: 0.5000 Epoch 79/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1602 - acc: 0.5000 Epoch 80/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1590 - acc: 0.5000 Epoch 81/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1578 - acc: 0.5000 Epoch 82/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1566 - acc: 0.5000 Epoch 83/500 4/4 [==============================] - 0s 500us/sample - loss: 0.1555 - acc: 0.5000 Epoch 84/500 4/4 [==============================] - 0s 749us/sample - loss: 0.1544 - acc: 0.5000 Epoch 85/500 4/4 [==============================] - 0s 750us/sample - loss: 0.1533 - acc: 0.5000 ###Markdown โ€ป ๋ฌธ์ œ147. NotAND ๊ฒŒ์ดํŠธ๋ฅผ ๊ตฌํ˜„ํ•˜์‹œ์˜ค ###Code import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import SGD from tensorflow.keras.losses import mse # ๋ฐ์ดํ„ฐ ์ค€๋น„ x = np.array([[0,0],[1,0],[0,1],[1,1]]) y = np.array([[1],[1],[1],[0]]) # ๋ชจ๋ธ ๊ตฌ์„ฑํ•˜๊ธฐ model = Sequential() # ๋‹จ์ธต ํผ์…‰ํŠธ๋ก  ๊ตฌํ˜„ํ•˜๊ธฐ model.add(Dense(1, input_shape = (2,), activation = 'linear')) # ๋ชจ๋ธ ์ค€๋น„ํ•˜๊ธฐ model.compile(optimizer = SGD(), loss = mse, metrics = ['acc']) #ํ•™์Šต ์‹œํ‚ค๊ธฐ model.fit(x, y, epochs = 500) model.evaluate(x, y) result = model.predict(x) for i in result: for j in i: print(round(j)) ###Output Epoch 1/500 4/4 [==============================] - 0s 26ms/sample - loss: 1.8562 - acc: 0.2500 Epoch 2/500 4/4 [==============================] - 0s 250us/sample - loss: 1.7615 - acc: 0.2500 Epoch 3/500 4/4 [==============================] - 0s 500us/sample - loss: 1.6726 - acc: 0.2500 Epoch 4/500 4/4 [==============================] - 0s 250us/sample - loss: 1.5892 - acc: 0.2500 Epoch 5/500 4/4 [==============================] - 0s 250us/sample - loss: 1.5108 - acc: 0.2500 Epoch 6/500 4/4 [==============================] - 0s 500us/sample - loss: 1.4372 - acc: 0.2500 Epoch 7/500 4/4 [==============================] - 0s 750us/sample - loss: 1.3680 - acc: 0.2500 Epoch 8/500 4/4 [==============================] - 0s 500us/sample - loss: 1.3030 - acc: 0.2500 Epoch 9/500 4/4 [==============================] - 0s 500us/sample - loss: 1.2420 - acc: 0.2500 Epoch 10/500 4/4 [==============================] - 0s 749us/sample - loss: 1.1846 - acc: 0.2500 Epoch 11/500 4/4 [==============================] - 0s 500us/sample - loss: 1.1307 - acc: 0.2500 Epoch 12/500 4/4 [==============================] - 0s 749us/sample - loss: 1.0800 - acc: 0.2500 Epoch 13/500 4/4 [==============================] - 0s 500us/sample - loss: 1.0324 - acc: 0.5000 Epoch 14/500 4/4 [==============================] - 0s 499us/sample - loss: 0.9876 - acc: 0.5000 Epoch 15/500 4/4 [==============================] - 0s 749us/sample - loss: 0.9455 - acc: 0.5000 Epoch 16/500 4/4 [==============================] - 0s 499us/sample - loss: 0.9058 - acc: 0.5000 Epoch 17/500 4/4 [==============================] - 0s 500us/sample - loss: 0.8686 - acc: 0.5000 Epoch 18/500 4/4 [==============================] - 0s 250us/sample - loss: 0.8335 - acc: 0.5000 Epoch 19/500 4/4 [==============================] - 0s 750us/sample - loss: 0.8005 - acc: 0.5000 Epoch 20/500 4/4 [==============================] - 0s 250us/sample - loss: 0.7694 - acc: 0.5000 Epoch 21/500 4/4 [==============================] - 0s 250us/sample - loss: 0.7401 - acc: 0.5000 Epoch 22/500 4/4 [==============================] - 0s 250us/sample - loss: 0.7126 - acc: 0.5000 Epoch 23/500 4/4 [==============================] - 0s 500us/sample - loss: 0.6866 - acc: 0.5000 Epoch 24/500 4/4 [==============================] - 0s 250us/sample - loss: 0.6622 - acc: 0.5000 Epoch 25/500 4/4 [==============================] - 0s 250us/sample - loss: 0.6391 - acc: 0.5000 Epoch 26/500 4/4 [==============================] - 0s 500us/sample - loss: 0.6174 - acc: 0.5000 Epoch 27/500 4/4 [==============================] - 0s 500us/sample - loss: 0.5969 - acc: 0.5000 Epoch 28/500 4/4 [==============================] - 0s 750us/sample - loss: 0.5775 - acc: 0.5000 Epoch 29/500 4/4 [==============================] - 0s 500us/sample - loss: 0.5593 - acc: 0.5000 Epoch 30/500 4/4 [==============================] - 0s 500us/sample - loss: 0.5421 - acc: 0.5000 Epoch 31/500 4/4 [==============================] - 0s 500us/sample - loss: 0.5258 - acc: 0.7500 Epoch 32/500 4/4 [==============================] - 0s 500us/sample - loss: 0.5104 - acc: 0.7500 Epoch 33/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4959 - acc: 0.7500 Epoch 34/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4822 - acc: 0.7500 Epoch 35/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4692 - acc: 0.7500 Epoch 36/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4569 - acc: 0.7500 Epoch 37/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4453 - acc: 0.7500 Epoch 38/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4343 - acc: 0.7500 Epoch 39/500 4/4 [==============================] - 0s 250us/sample - loss: 0.4238 - acc: 0.7500 Epoch 40/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4139 - acc: 0.7500 Epoch 41/500 4/4 [==============================] - 0s 500us/sample - loss: 0.4046 - acc: 0.7500 Epoch 42/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3956 - acc: 0.7500 Epoch 43/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3872 - acc: 0.7500 Epoch 44/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3792 - acc: 0.7500 Epoch 45/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3715 - acc: 0.7500 Epoch 46/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3643 - acc: 0.7500 Epoch 47/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3573 - acc: 0.7500 Epoch 48/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3508 - acc: 0.7500 Epoch 49/500 4/4 [==============================] - 0s 750us/sample - loss: 0.3445 - acc: 0.7500 Epoch 50/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3385 - acc: 0.7500 Epoch 51/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3328 - acc: 0.7500 Epoch 52/500 4/4 [==============================] - 0s 750us/sample - loss: 0.3273 - acc: 0.7500 Epoch 53/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3221 - acc: 0.7500 Epoch 54/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3172 - acc: 0.7500 Epoch 55/500 4/4 [==============================] - 0s 749us/sample - loss: 0.3124 - acc: 0.7500 Epoch 56/500 4/4 [==============================] - 0s 500us/sample - loss: 0.3078 - acc: 0.7500 Epoch 57/500 4/4 [==============================] - 0s 250us/sample - loss: 0.3035 - acc: 0.7500 Epoch 58/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2993 - acc: 0.7500 Epoch 59/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2952 - acc: 0.7500 Epoch 60/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2914 - acc: 0.7500 Epoch 61/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2877 - acc: 0.7500 Epoch 62/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2841 - acc: 0.7500 Epoch 63/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2807 - acc: 0.7500 Epoch 64/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2773 - acc: 0.7500 Epoch 65/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2741 - acc: 0.7500 Epoch 66/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2711 - acc: 0.7500 Epoch 67/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2681 - acc: 0.7500 Epoch 68/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2652 - acc: 0.7500 Epoch 69/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2624 - acc: 0.7500 Epoch 70/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2597 - acc: 0.7500 Epoch 71/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2571 - acc: 0.7500 Epoch 72/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2546 - acc: 0.7500 Epoch 73/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2521 - acc: 0.7500 Epoch 74/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2498 - acc: 0.7500 Epoch 75/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2474 - acc: 0.7500 Epoch 76/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2452 - acc: 0.7500 Epoch 77/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2430 - acc: 0.7500 Epoch 78/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2409 - acc: 0.7500 Epoch 79/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2388 - acc: 0.7500 Epoch 80/500 4/4 [==============================] - 0s 249us/sample - loss: 0.2368 - acc: 0.7500 Epoch 81/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2348 - acc: 0.7500 Epoch 82/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2329 - acc: 0.7500 Epoch 83/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2310 - acc: 0.7500 Epoch 84/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2292 - acc: 0.7500 Epoch 85/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2274 - acc: 0.7500 ###Markdown โ€ป ๋ฌธ์ œ148. XOR ๊ฒŒ์ดํŠธ๋ฅผ ๊ตฌํ˜„ํ•˜์‹œ์˜ค ###Code import tensorflow as tf from tensorflow.keras.models import Sequential # ์‹ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์„ฑ from tensorflow.keras.layers import Dense # ์™„์ „ ์—ฐ๊ฒฐ๊ณ„์ธต from tensorflow.keras.optimizers import SGD, RMSprop # ๊ฒฝ์‚ฌ ๊ฐ์†Œ๋ฒ• from tensorflow.keras.losses import mse # ์˜ค์ฐจํ•จ์ˆ˜์ˆ˜ x = np.array([[0,0],[1,0],[0,1],[1,1]]) y = np.array([[0],[1],[1],[0]]) model = Sequential() model.add(Dense(32, input_shape = (2,), activation = 'relu')) # ๋ชจ๋ธ์˜ ์ฒซ๋ฒˆ์งธ ์ธต์€ ๋ฐ์ดํ„ฐ์˜ ํ˜•ํƒœ(input_shape)๋ฅผ ์ „๋‹ฌํ•ด์ค˜์•ผ ํ•จ model.add(Dense(1, activation = 'sigmoid')) # ํ‰๊ท ์ œ๊ณฑ์˜ค์ฐจ ํšŒ๊ท€๋ฌธ์ œ model.compile(optimizer = RMSprop(), loss = mse, metrics = ['acc']) # ๋ชจ๋ธ์„ model.add๋กœ ๊ตฌ์„ฑํ–ˆ์œผ๋ฉด compile ํ•จ์ˆ˜๋ฅผ ํ˜ธ์ถœํ•ด์„œ ํ•™์Šต๊ณผ์ •์„ ์„ค์ • # metrics = ['acc'] : ํ•™์Šต๊ณผ์ •์„ ๋ชจ๋‹ˆํ„ฐ๋ง ํ•˜๊ธฐ ์œ„ํ•ด์„œ ์„ค์ • """ ์ดํ•ญ๋ถ„๋ฅ˜ ๋ฌธ์ œ(์ดํŒŒ๋ฆฌ, ๊ฐ•์•„์ง€/๊ณ ์–‘์ด๊ฐ™์€ 2๊ฐ€์ง€ ๋ถ„๋ฅ˜) model.compile(optimizer = RMSprop(), loss = binary_crossentropy, metrics = ['acc']) ๋‹คํ•ญ๋ถ„๋ฅ˜ ๋ฌธ์ œ model.compile(optimizer = RMSprop(), loss = categorical_crossentropy, metrics = ['acc']) ์˜ตํ‹ฐ๋งˆ์ด์ € ์ข…๋ฅ˜ : SGD, RMSprop, Adam, NAdam ๋“ฑ ์†์‹คํ•จ์ˆ˜(์˜ค์ฐจํ•จ์ˆ˜) ์ข…๋ฅ˜ : mse, binary_crossentropy, categorical_crossentropy """ model.fit(x, y, epochs = 500) # model.fit(data, label, epochs= , validation_data=(val_data, val_label) ) """ validation_data : ๊ฒ€์ •๋ฐ์ดํ„ฐ. ๋ชจ๋ธ์˜ ์„ฑ๋Šฅ์„ ๋ชจ๋‹ˆํ„ฐ๋ง ํ•˜๊ธฐ ์œ„ํ•ด์„œ ์‚ฌ์šฉ. ํ›ˆ๋ จ ๋ฐ์ดํ„ฐ์˜ ์ผ๋ถ€๋ฅผ ๊ฐ€์ง€๊ณ  ๋งŒ๋“  ๋ฐ์ดํ„ฐ ํ›ˆ๋ จ์ด ์ž˜ ๋˜๋Š”์ง€ ์„ฑ๋Šฅ์„ ๋ณด๊ธฐ์œ„ํ•œ ํ‰๊ฐ€ ์ง€ํ‘œ๋กœ ์‚ฌ์šฉ """ model.evaluate(x, y) """ evaluate ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ์†์‹ค๊ณผ ํ‰๊ฐ€ ์ง€ํ‘œ์— ๋Œ€ํ•œ ์ •๋ณด๋ฅผ ํ™•์ธ ๊ฒฐ๊ณผ ์˜ˆ: [[-0.18919873] [ 0.27109194] [ 0.24799117] [ 0.7082819 ]] """ result = model.predict(x) for i in result: for j in i: print(round(j)) ###Output Epoch 1/500 4/4 [==============================] - 0s 65ms/sample - loss: 0.2540 - acc: 0.7500 Epoch 2/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2518 - acc: 0.5000 Epoch 3/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2503 - acc: 0.5000 Epoch 4/500 4/4 [==============================] - 0s 2ms/sample - loss: 0.2493 - acc: 0.5000 Epoch 5/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2484 - acc: 0.5000 Epoch 6/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2476 - acc: 0.5000 Epoch 7/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2468 - acc: 0.5000 Epoch 8/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2461 - acc: 0.5000 Epoch 9/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2455 - acc: 0.5000 Epoch 10/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2450 - acc: 0.5000 Epoch 11/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2445 - acc: 0.5000 Epoch 12/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2440 - acc: 0.5000 Epoch 13/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2435 - acc: 0.5000 Epoch 14/500 4/4 [==============================] - 0s 750us/sample - loss: 0.2431 - acc: 0.5000 Epoch 15/500 4/4 [==============================] - 0s 4ms/sample - loss: 0.2426 - acc: 0.5000 Epoch 16/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2422 - acc: 0.5000 Epoch 17/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2418 - acc: 0.5000 Epoch 18/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2414 - acc: 0.5000 Epoch 19/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2410 - acc: 0.5000 Epoch 20/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2406 - acc: 0.5000 Epoch 21/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2402 - acc: 0.5000 Epoch 22/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2398 - acc: 0.5000 Epoch 23/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2394 - acc: 0.5000 Epoch 24/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2391 - acc: 0.5000 Epoch 25/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2387 - acc: 0.5000 Epoch 26/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2383 - acc: 0.5000 Epoch 27/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2379 - acc: 0.5000 Epoch 28/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2376 - acc: 0.5000 Epoch 29/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2372 - acc: 0.5000 Epoch 30/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2368 - acc: 0.5000 Epoch 31/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2365 - acc: 0.5000 Epoch 32/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2361 - acc: 0.5000 Epoch 33/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2358 - acc: 0.5000 Epoch 34/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2354 - acc: 0.5000 Epoch 35/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2350 - acc: 0.5000 Epoch 36/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2347 - acc: 0.5000 Epoch 37/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2343 - acc: 0.5000 Epoch 38/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2340 - acc: 0.5000 Epoch 39/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2336 - acc: 0.5000 Epoch 40/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2332 - acc: 0.5000 Epoch 41/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2329 - acc: 0.5000 Epoch 42/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2325 - acc: 0.5000 Epoch 43/500 4/4 [==============================] - 0s 749us/sample - loss: 0.2321 - acc: 0.5000 Epoch 44/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2318 - acc: 0.5000 Epoch 45/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2314 - acc: 0.5000 Epoch 46/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2310 - acc: 0.5000 Epoch 47/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2307 - acc: 0.5000 Epoch 48/500 4/4 [==============================] - 0s 249us/sample - loss: 0.2303 - acc: 0.5000 Epoch 49/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2299 - acc: 0.5000 Epoch 50/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2296 - acc: 0.5000 Epoch 51/500 4/4 [==============================] - 0s 498us/sample - loss: 0.2292 - acc: 0.5000 Epoch 52/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2288 - acc: 0.5000 Epoch 53/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2284 - acc: 0.5000 Epoch 54/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2281 - acc: 0.5000 Epoch 55/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2277 - acc: 0.5000 Epoch 56/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2273 - acc: 0.5000 Epoch 57/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2270 - acc: 0.5000 Epoch 58/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2266 - acc: 0.5000 Epoch 59/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2262 - acc: 0.7500 Epoch 60/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2258 - acc: 0.7500 Epoch 61/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2254 - acc: 0.7500 Epoch 62/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2251 - acc: 1.0000 Epoch 63/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2247 - acc: 1.0000 Epoch 64/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2243 - acc: 1.0000 Epoch 65/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2239 - acc: 1.0000 Epoch 66/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2235 - acc: 1.0000 Epoch 67/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2231 - acc: 1.0000 Epoch 68/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2227 - acc: 1.0000 Epoch 69/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2224 - acc: 1.0000 Epoch 70/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2219 - acc: 1.0000 Epoch 71/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2216 - acc: 1.0000 Epoch 72/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2212 - acc: 1.0000 Epoch 73/500 4/4 [==============================] - 0s 499us/sample - loss: 0.2208 - acc: 1.0000 Epoch 74/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2204 - acc: 1.0000 Epoch 75/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2200 - acc: 1.0000 Epoch 76/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2197 - acc: 1.0000 Epoch 77/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2192 - acc: 1.0000 Epoch 78/500 4/4 [==============================] - 0s 498us/sample - loss: 0.2188 - acc: 1.0000 Epoch 79/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2184 - acc: 1.0000 Epoch 80/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2181 - acc: 1.0000 Epoch 81/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2176 - acc: 1.0000 Epoch 82/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2173 - acc: 1.0000 Epoch 83/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2168 - acc: 1.0000 Epoch 84/500 4/4 [==============================] - 0s 500us/sample - loss: 0.2165 - acc: 1.0000 Epoch 85/500 4/4 [==============================] - 0s 250us/sample - loss: 0.2160 - acc: 1.0000 ###Markdown โ–  ํ…์„œ ํ”Œ๋กœ์šฐ 2.x๋กœ mnist ๋ฐ์ดํ„ฐ ํ•™์Šต์‹œํ‚ค๋Š” ์‹ ๊ฒฝ๋ง ๊ตฌํ˜„ 1. mnist ๋ฐ์ดํ„ฐ์…‹ ๋‹ค์šด๋ฐ›๊ธฐ ###Code from tensorflow.keras.datasets.mnist import load_data (x_train, y_train), (x_test, y_test) = load_data(path='mnist.npz') print(x_train.shape, y_train.shape) print(y_train) print(x_test.shape, y_test.shape) print(y_test) ###Output (60000, 28, 28) (60000,) [5 0 4 ... 5 6 8] (10000, 28, 28) (10000,) [7 2 1 ... 4 5 6] ###Markdown 2. ๋ฐ์ดํ„ฐ ๊ทธ๋ ค๋ณด๊ธฐ ###Code import matplotlib.pyplot as plt import numpy as np plt.rcParams['figure.figsize']=(5,5) plt.rcParams.update({'font.size':13}) sample_size = 3 random_idx = np.random.randint(60000, size = sample_size) for idx in random_idx: img = x_train[idx, :] label = y_train[idx] plt.figure() plt.imshow(img) ###Output _____no_output_____ ###Markdown 3. ๊ฒ€์ฆ ๋ฐ์ดํ„ฐ ๋งŒ๋“ค๊ธฐ ###Code from sklearn.model_selection import train_test_split # ํ›ˆ๋ จ, ํ…Œ์ŠคํŠธ ๋ฐ์ดํ„ฐ 7:3 ๋น„์œจ๋กœ ๋ถ„๋ฆฌ x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777) print(x_train.shape) print(x_val.shape) ###Output (42000, 28, 28) (18000, 28, 28) ###Markdown 4. ๋ชจ๋ธ์— ์ž…๋ ฅํ•˜๊ธฐ ์ „์— ๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ(์ •๊ทœํ™”) ๋ฉด์ ‘์งˆ๋ฌธ : ์ •๊ทœํ™” ๋˜๋Š” ์Šค์ผ€์ผ์„ ํ•ด์•ผํ•˜๋Š” ์ด์œ  ์‹ ๊ฒฝ๋ง์€ ์ž…๋ ฅ ๋ฐ์ดํ„ฐ์˜ ์Šค์ผ€์ผ์— ๋งค์šฐ ๋ฏผ๊ฐํ•˜๋ฏ€๋กœ ์ ์ ˆํ•œ ์ „์ฒ˜๋ฆฌ๊ฐ€ ํ•„์ˆ˜ ์ˆซ์ž์˜ ํ•˜๋‚˜์˜ ํ”ฝ์…€์ด 0~255 ์‚ฌ์ด์˜ ๋ฒ”์œ„์— ์žˆ๊ธฐ ๋•Œ๋ฌธ์— ํ•˜๋‚˜์˜ ํ”ฝ์…€์„ 255๋กœ ๋‚˜๋ˆ„๋ฉด 0~1์‚ฌ์ด์˜ ์ˆซ์ž๋กœ ์Šค์ผ€์ผ์ด ๋œ๋‹ค. ###Code num_x_train = x_train.shape[0] num_x_val = x_val.shape[0] num_x_test = x_test.shape[0] x_train = (x_train.reshape((num_x_train, 28*28)))/255 x_val = (x_val.reshape((num_x_val, 28*28)))/255 x_test = (x_test.reshape((num_x_test, 28*28)))/255 print(x_train.shape) # ๋ชจ๋ธ ์ž…๋ ฅ์„ ์œ„ํ•ด ๋ฐ์ดํ„ฐ๋ฅผ 2์ฐจ์›์œผ๋กœ ๋ณ€๊ฒฝ print(x_val.shape) print(x_test.shape) ###Output (42000, 784) (18000, 784) (10000, 784) ###Markdown 5. ๋ชจ๋ธ ์ž…๋ ฅ์„ ์œ„ํ•œ ๋ ˆ์ด๋ธ” ์ „์ฒ˜๋ฆฌ ###Code from tensorflow.keras.utils import to_categorical # mnist์˜ ๋ผ๋ฒจ ์ˆซ์ž๋ฅผ One_Hot_encoding ํ•จ์ˆ˜ y_train = to_categorical(y_train) y_val = to_categorical(y_val) """ loader2.py์— label_load์—์„œ one hot encoding ํ–ˆ์œผ๋ฏ€๋กœ ์ดํŒŒ๋ฆฌ ๋ฐ์ดํ„ฐ ๋ถ„๋ฅ˜ ์‹ ๊ฒฝ๋ง์—์„œ๋Š” ์œ„์˜ ์ฝ”๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์ง€ ์•Š์Œ """ print(y_val) ###Output [[0. 0. 0. ... 1. 0. 0.] [0. 0. 0. ... 0. 1. 0.] [0. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 1. 0. 0.] [0. 0. 0. ... 0. 0. 0.]] ###Markdown 6. ๋ชจ๋ธ ๊ตฌ์„ฑํ•˜๊ธฐ ###Code from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense model = Sequential() model.add(Dense(64, input_shape=(784,), activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(10, activation='softmax')) ###Output _____no_output_____ ###Markdown 7. ๋ชจ๋ธ๊ณผ์ • ์„ค์ •ํ•˜๊ธฐ ###Code from tensorflow.keras.losses import categorical_crossentropy model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) ###Output _____no_output_____ ###Markdown 8. ๋ชจ๋ธ ํ•™์Šตํ•˜๊ธฐ ###Code history = model.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(x_val, y_val)) ###Output Train on 42000 samples, validate on 18000 samples Epoch 1/30 42000/42000 [==============================] - 3s 74us/sample - loss: 0.4763 - acc: 0.8654 - val_loss: 0.2444 - val_acc: 0.9291 Epoch 2/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.2074 - acc: 0.9398 - val_loss: 0.1923 - val_acc: 0.9446 Epoch 3/30 42000/42000 [==============================] - 2s 37us/sample - loss: 0.1560 - acc: 0.9540 - val_loss: 0.1617 - val_acc: 0.9529 Epoch 4/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.1273 - acc: 0.9628 - val_loss: 0.1402 - val_acc: 0.9579 Epoch 5/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.1075 - acc: 0.9688 - val_loss: 0.1287 - val_acc: 0.9614 Epoch 6/30 42000/42000 [==============================] - 1s 23us/sample - loss: 0.0911 - acc: 0.9739 - val_loss: 0.1249 - val_acc: 0.9624 Epoch 7/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0796 - acc: 0.9768 - val_loss: 0.1154 - val_acc: 0.9658 Epoch 8/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.0693 - acc: 0.9796 - val_loss: 0.1160 - val_acc: 0.9664 Epoch 9/30 42000/42000 [==============================] - 1s 24us/sample - loss: 0.0609 - acc: 0.9821 - val_loss: 0.1115 - val_acc: 0.9669 Epoch 10/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.0521 - acc: 0.9846 - val_loss: 0.1067 - val_acc: 0.9691 Epoch 11/30 42000/42000 [==============================] - 1s 20us/sample - loss: 0.0463 - acc: 0.9856 - val_loss: 0.1035 - val_acc: 0.9700 Epoch 12/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0413 - acc: 0.9875 - val_loss: 0.1111 - val_acc: 0.9671 Epoch 13/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0369 - acc: 0.9887 - val_loss: 0.1084 - val_acc: 0.9693 Epoch 14/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.0312 - acc: 0.9912 - val_loss: 0.1087 - val_acc: 0.9693 Epoch 15/30 42000/42000 [==============================] - 1s 24us/sample - loss: 0.0295 - acc: 0.9908 - val_loss: 0.1108 - val_acc: 0.9699 Epoch 16/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0263 - acc: 0.9925 - val_loss: 0.1110 - val_acc: 0.9693 Epoch 17/30 42000/42000 [==============================] - 1s 24us/sample - loss: 0.0221 - acc: 0.9939 - val_loss: 0.1082 - val_acc: 0.9698 Epoch 18/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.0211 - acc: 0.9936 - val_loss: 0.1149 - val_acc: 0.9698 Epoch 19/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.0202 - acc: 0.9937 - val_loss: 0.1185 - val_acc: 0.9687 Epoch 20/30 42000/42000 [==============================] - 1s 23us/sample - loss: 0.0159 - acc: 0.9957 - val_loss: 0.1181 - val_acc: 0.9689 Epoch 21/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0137 - acc: 0.9961 - val_loss: 0.1160 - val_acc: 0.9705 Epoch 22/30 42000/42000 [==============================] - 1s 21us/sample - loss: 0.0120 - acc: 0.9971 - val_loss: 0.1296 - val_acc: 0.9687 Epoch 23/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0140 - acc: 0.9960 - val_loss: 0.1355 - val_acc: 0.9672 Epoch 24/30 42000/42000 [==============================] - 1s 23us/sample - loss: 0.0089 - acc: 0.9980 - val_loss: 0.1308 - val_acc: 0.9696 Epoch 25/30 42000/42000 [==============================] - 1s 23us/sample - loss: 0.0083 - acc: 0.9982 - val_loss: 0.1366 - val_acc: 0.9692 Epoch 26/30 42000/42000 [==============================] - 1s 25us/sample - loss: 0.0077 - acc: 0.9985 - val_loss: 0.1384 - val_acc: 0.9700 Epoch 27/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0150 - acc: 0.9950 - val_loss: 0.1409 - val_acc: 0.9689 Epoch 28/30 42000/42000 [==============================] - 1s 22us/sample - loss: 0.0095 - acc: 0.9972 - val_loss: 0.1508 - val_acc: 0.9676 Epoch 29/30 42000/42000 [==============================] - 1s 23us/sample - loss: 0.0062 - acc: 0.9987 - val_loss: 0.1327 - val_acc: 0.9721 Epoch 30/30 42000/42000 [==============================] - 1s 24us/sample - loss: 0.0043 - acc: 0.9991 - val_loss: 0.1534 - val_acc: 0.9679 ###Markdown 9. history๋ฅผ ํ†ตํ•ด ํ™•์ธํ•ด ๋ณผ ์ˆ˜ ์žˆ๋Š” ๊ฐ’ ์ถœ๋ ฅํ•˜๊ธฐ ###Code history.history.keys() ###Output _____no_output_____ ###Markdown 10. ์‹œ๊ฐํ™” ์ฝ”๋“œ ###Code plt.rcParams['figure.figsize']=(20,10) plt.rcParams.update({'font.size':15}) plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() ###Output _____no_output_____
09_fv_nonlinear.ipynb
###Markdown Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli ###Code from __future__ import print_function %matplotlib inline import numpy import matplotlib.pyplot as plt import matplotlib.animation from IPython.display import HTML from clawpack import pyclaw from clawpack import riemann ###Output _____no_output_____ ###Markdown Finite Volume Methods for Nonlinear Systems Godunov's MethodRecall that our goal is to evolve the cell-averages$$ Q^n_i \approx \frac{1}{\Delta x} \int^{x_{i+1/2}}_{x_{i-1/2}} q(x, t_n) dx$$using a piecewise reconstruction $\widetilde{q}^n(x, t_n)$ using these cell-averages and evolving these functions using the conservation law.Solving (evolving) over time $\Delta t$ with this data gives us the function $\widetilde{q}^n(x, t_{n+1})$ leading to$$ Q^{n+1}_i = \frac{1}{\Delta x} \int^{x_{i+1/2}}_{x_{i-1/2}} \widetilde{q}^n(x, t_{n+1}) dx$$. The final component of Godunov's method suggests that we do not need the entire Riemann solution but only need the solution along the cell-interface $x_{i-1/2}$ such that$$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{\Delta x} (F^n_{i+1/2} - F^n_{i-1/2})$$where$$ F^n_{i-1/2} = \mathcal{F}(Q^n_{i-1}, Q^n_i) = f(\widehat{q}(Q^n_{i-1}, Q^n_i)$$where $\widehat{q}(Q^n_{i-1}, Q^n_i)$ is the Riemann solution evaluated along $x/t = 0$. Godunov's method can also be implemented in wave-propagation form$$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{\Delta x} (\mathcal{A}^+ \Delta Q_{i-1/2} + \mathcal{A}^- \Delta Q_{i+1/2}),$$which takes the fluxes to be$$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= f(\widehat{Q}_{i-1/2}) - f(Q_{i-1}) \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= f(Q_{i}) - f(\widehat{Q}_{i-1/2}).\end{aligned}$$ The primary note of importance now is that all we need for Godunov's method is what the solution along the grid cell edge is rather than the full Riemann solution we have been working with. This strongly suggests that there may be ways to use approximate Riemann solvers that give us only what we need and are less expensive. Convergence of Godunov's MethodIt also useful at this point to review what we know about the convergence of Godunov's method that we showed before. 1. The Lax-Wendroff theorem implies that for nonlinear systems of conservation laws, if we have a sequence of numerical approximations representing grid refinement that this sequence will converge in the appropriate sense to a function $q(x,t)$ and that this functions is a weak solution of the conservation law. This is unfortunately not true in general for the nonconservative form of the equations. 2. Entropy conditions will allow us to pick out the correct weak solution and if we employ Riemann solvers that obey the appropriate Entropy conditions that the overall method will also pick out the entropy satisfying solution. 3. The Lax-Wendroff theorem unfortunately does not guarantee convergence, rather it only says *if* a sequence converges that it converges to a weak solution of the conservation law. Showing convergence requires a form of stability for which we used TV-stability before. Unfortunately TV-stability cannot be extended as is to the system case. Approximate Riemann SolversWe now will start to discuss the idea that perhaps we only need a small part of the full Riemann solution if we are interested in using Godunov's methods. In particular, if $\widehat{q}(q_\ell, q_r)$ is the general, full solution to a Riemann problem that we only need to know the state along $x/t = 0$. This usually implies that we need to compute one of the middle states $q_m$ of the Riemann solution although this is highly dependent on wave speeds and criticality conditions. Define a function$$ \widehat{Q}_{i-1/2}(x/t)$$that approximates the true similarity solution of the Riemann problem with input data $Q_{i-1}$ and $Q_i$. This approximation will generally depend on some set of jumps in $Q$ where$$ Q_i - Q_{i-1} = \sum^{M_w}_{p=1} \mathcal{W}^p_{i-1/2}$$where now we are allowed to pick out how many waves $M_w$ represent the approximation. Generalizing Godunov's method to systems then we could take two different approaches to defining the fluctuations:1. Define the numerical flux by$$ F_{i-1/2} = f(\widehat{Q}_{i-1/2})$$where$$ \widehat{Q}_{i-1/2} = Q_{i-1} + \sum_{p:s^p_{i-1/2} < 0} \mathcal{W}^p_{i-1/2}.$$In other words the state that lies along $x/t = 0$. We can also go the other direction so that$$ \widehat{Q}_{i-1/2} = Q_{i} - \sum_{p:s^p_{i-1/2} > 0} \mathcal{W}^p_{i-1/2}.$$Therefore the fluctuations are $$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= f(\widehat{Q}_{i-1/2}) - f(Q_{i-1}) \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= f(Q_{i}) - f(\widehat{Q}_{i-1/2}) \\\end{aligned}$$1. Use the waves and speeds to define$$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^- \mathcal{W}^p_{i-1/2} \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^+ \mathcal{W}^p_{i-1/2} \\\end{aligned}$$ The important realization here is that both of these approaches are the same for the all-shock Riemann solution. This also implies that unless we have a transonic rarefaction, i.e. a rarefaction fan contains $x/t = 0$ we do not need to worry about what the exact type of wave is but rather what middle state contains $x/t = 0$. Linearized Riemann SolversProbably the most natural way to find an approximate Riemann solver is to find some linearization of the problem that is appropriate for the conservation law such that for $q_t + f(q)_x = 0$ we can instead locally solve$$ \widehat{q}_t + \widehat{A}_{i-1/2} \widehat{q}_x = 0$$such that the matrix $\widehat{A}_{i-1/2}$ as an appropriate approximation to $f'(q)$ valid in the neighborhood of $x_{i-1/2}$. We also need to require that $\widehat{A}_{i-1/2}$ is diagonalizable with real eigenvalues so that we can have some sense that$$ \widehat{A}_{i-1/2} \rightarrow f'(\overline{q}) \quad \text{as } Q_{i-1}, Q_i \rightarrow \overline{q}$$for consistency. One of the reasons that we expect this to work is that for most Riemann solutions the shocks are well isolated. This implies that the difference from one cell to another$$ ||Q_i - Q_{i-1}|| = \mathcal{O}(\Delta x)$$as long as the jump is not large and therefore$$ f'(Q_{i-1}) \approx f'(Q_i).$$ We also know that if $||Q_i - Q_{i-1}|| = \mathcal{O}(\Delta x)$ that we expect that the Hugoniot loci and integral curves are similar to the eigenvectors of the system. The solution would then be similar to the linear hyperbolic systems we have studied before with the waves determined by the eigenvectors $\widehat{r}^p_{i-1/2}$ and speeds $s^p_{i-1/2} = \widehat{\lambda}^p_{i-1/2}$. This also allows us to easily identify the waves as$$ Q_i - Q_{i-1} = \sum^m_{p=1} \alpha^p_{i-1/2} \widehat{r}^p_{i-1/2}$$and therefore$$ \mathcal{W}^p_{i-1/2} = \alpha^p_{i-1/2} \widehat{r}^p_{i-1/2}.$$ There are of course multiple ways to form this linearized approximation. In general we could use$$ \widehat{A}_{i-1/2} = f'(\overline{Q}_{i-1/2})$$where $\overline{Q}_{i-1/2}$ is some appropriate "average state" dependent on $Q_i$ and $Q_{i-1}$. - What "average state" would you propose? - What properties of the solution might not work in general? The most obvious average is the true average of the values$$ \overline{Q}_{i-1/2} = \frac{1}{2} (Q_i + Q_{i-1}).$$Although this is consistent this does not imply that the method is consistent unless some form of $$ \widehat{Q}_{i-1/2} = Q_{i-1} + \sum_{p:s^p_{i-1/2} < 0} \mathcal{W}^p_{i-1/2}$$and $$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= f(\widehat{Q}_{i-1/2}) - f(Q_{i-1}) \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= f(Q_{i}) - f(\widehat{Q}_{i-1/2}) \\\end{aligned}$$are satisfied. Unfortunately$$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^- \mathcal{W}^p_{i-1/2} \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^+ \mathcal{W}^p_{i-1/2} \\\end{aligned}$$does not guarantee conservation unless we add an additional condition. The primary condition we need to ensure for conservation is actually$$ f(Q_i) - f(Q_{i-1}) = \sum^{M_w}_{p=1} s^p_{i-1/2} \mathcal{W}^p_{i-1/2},$$which in general is not satisfied for many different forms of $\overline{Q}_{i-1/2}$. Averaging $Q$ values is not the only approach, why not average the flux values with$$ \widehat{A}_{i-1/2} = \frac{1}{2}[f'(Q_{i-1}) + f'(Q_i)]$$or perhaps some other average of Jacobian evaluations. Unfortunately this also does not satisfy the jump in fluxes previously mentioned unless care is taken. Roe LinearizationOne of the keys to providing a robust linearization is to put some conditions on the linearization and its eigenspace. The first of these is:> If $Q_{i-1}$ and $Q_i$ are connected by a single wave $\mathcal{W}^p = Q_i - Q_{i-1}$ in the true Riemann solution, then $\mathcal{W}^p$ should also be an eigenvector of $\widehat{A}_{i-1/2}$.If this is true then the approximation will consist of a single wave that agrees with the exact Riemann solution with the strongest solution. Another way to say this is that if $Q_i$ and $Q_{i-1}$ are connected by a single wave, then$$ f(Q_i) - f(Q_{i-1}) = s (Q_i - Q_{i-1}).$$If the linearized problem also has this form then$$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = s (Q_i - Q_{i-1})$$implying$$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = f(Q_i) - f(Q_{i-1}).$$If this last expression is true then an approximate solver of this form is in fact conservative. This can also be shown via$$ \mathcal{A}^- \Delta Q_{i-1/2} + \mathcal{A}^+ \Delta Q_{i-1/2} = f(Q_i) - f(Q_{i-1}),$$which is implied by the above condition. Consequently the condition$$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = f(Q_i) - f(Q_{i-1})$$is often called **Roe's Condition**. The practical side of this is that we need to find an average that satisfies this condition. One way to do this is to think of the problem as finding a path through state space connecting $Q_i$ and $Q_{i-1}$ parameterized by$$ q(\xi) = Q_{i-1} + (Q_i - Q_{i-1}) \xi$$for $\xi \in [0, 1]$ and require it satisfy Roe's condition. Writing this out we then have$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \int^1_0 \frac{\text{d}}{\text{d} \xi} f(q(\xi)) d\xi \\ &= \int^1_0 f'(q(\xi)) q'(\xi) d\xi \\ &= \left[ \int^1_0 f'(q(\xi)) d\xi \right ] (Q_i - Q_{i-1}).\end{aligned}$$ Recalling that we need $$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = f(Q_i) - f(Q_{i-1})$$this implies that $$ f(Q_i) - f(Q_{i-1}) = \left[ \int^1_0 f'(q(\xi)) d\xi \right ] (Q_i - Q_{i-1})$$gives us$$ \widehat{A}_{i-1/2} = \int^1_0 f'(q(\xi)) d\xi.$$ This unfortunately does not guarantee that the resulting matrix $\widehat{A}_{i-1/2}$ is diagonalizable with real eigenvalues. The integral itself can also be difficult to evaluate leaving us wanting a better approach. Instead Roe proposed a **parameter vector** $z(q)$, effectively a change of variables, that leads not only to easier evaluation of the integrals but also to evaluations that satisfies properties that we want. Here we now will integrate along the path$$ z(\xi) = Z_{i-1} + (Z_i - Z_{i-1}) \xi$$where $Z_j = z(Q_j)$ for $j=i-1, i$ and therefore $z'(\xi) = Z_i - Z_{i-1}$ that is independent of $\xi$. This implies$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \int^1_0 \frac{\text{d}}{\text{d} \xi} f(z(\xi)) d\xi \\ &= \int^1_0 f'(z(\xi)) z'(\xi) d\xi \\ &= \left[ \int^1_0 f'(z(\xi)) d\xi \right ] (Z_i - Z_{i-1}).\end{aligned}$$ This expression we hope is easier to evaluate but we have no idea what this expression $z(\xi)$ really is yet. We can find this by rewriting $z(q)$ as$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \widehat{C}_{i-1/2} (Z_i - Z_{i-1}) \\ Q_i - Q_{i-1} &= \widehat{B}_{i-1/2} (Z_i - Z_{i-1})\end{aligned}$$and therefore observing$$ \widehat{A}_{i-1/2} = \widehat{C}_{i-1/2} \widehat{B}^{-1}_{i-1/2}.$$ Harten and Lax showed that this approach will always be able to produce $\widehat{A}_{i-1/2}$ if the system has a convex entropy. One can actually also then choose $z(q) = \eta'(q)$.This being true we still want to ensure that the integrals of interest are easily evaluated, which is best shown by an example. Example: Roe Solver for Shallow Water$$ q = \begin{bmatrix} h \\ hu \end{bmatrix} = \begin{bmatrix} q^1 \\ q^2 \end{bmatrix} \quad f(q) = \begin{bmatrix} hu \\ hu^2 + \frac{1}{2} gh^2 \end{bmatrix} = \begin{bmatrix} q^2 \\ \frac{q^2}{(q^1)^2} + \frac{1}{2} g (q^1)^2 \end{bmatrix}$$and$$ f'(q) = \begin{bmatrix} 0 & 1 \\ -\left(\frac{q^2}{q^1} \right)^2 + g q^1 & 2 \frac{q^2}{q^1} \end{bmatrix} = \begin{bmatrix} 0 & 1 \\ -u^2 + g h & 2 u \end{bmatrix}$$Choose the parameterization as$$ z = h^{-1/2} q \quad \Rightarrow \quad \begin{bmatrix} z^1 \\ z^2 \end{bmatrix} = \begin{bmatrix} \sqrt{h} \\ u \sqrt{h} \end{bmatrix}$$See if you can carry this parameterization though and find $\widehat{A}_{i-1/2}$. Taking$$q(z) = \begin{bmatrix} (z^1)^2 \\ z^1 z^2 \end{bmatrix} \quad \Rightarrow \quad \frac{\partial q}{\partial z} = \begin{bmatrix} 2z^1 & 0 \\ z^2 & z^1 \end{bmatrix}$$and therefore$$ f(z) = \begin{bmatrix} z^1 z^2 \\ (x^2)^2 + \frac{1}{2} g (z^1)^4 \end{bmatrix} \quad \Rightarrow \quad \frac{\partial}{\partial z} f(z) = \begin{bmatrix} z^2 & z^1 \\ 2 g (z^1)^3 & 2 z^2 \end{bmatrix}$$ We now need to integrate from $\xi = 0 \ldots 1$ where$$ z^p = Z^p_{i-1} + (Z^p_i - Z^p_{i-1}) \xi.$$At this point all of our traverses through state space are linear except for one but we are still considering polynomials. Integrating the linear terms in our integrals leads us to$$ \int^1_0 z^p(\xi) d\xi = \frac{1}{2} (Z^p_{i-1} + Z^p_{i}) \equiv \overline{Z}^p,$$clearly just the average of the states of the transformed quantities $z(q)$.Integrating the higher order terms we have$$\begin{aligned} \int^1_0 (z^1(\xi))^3 d\xi &= \frac{1}{4} \left( \frac{(Z^1_i)^4 - (Z^1_{i-1})^4}{Z^1_i - Z^1_{i-1}} \right) \\ &= \frac{1}{2}(Z^1_{i-1} + Z^1_i) \cdot \frac{1}{2} \left [ (Z^1_{i-1})^2 + (Z^1_i)^2 \right ] \\ &= \overline{Z}^1 \overline{h},\end{aligned}$$where$$ \overline{h} = \frac{1}{2} (h_{i-1} + h_i).$$ From this we obtain$$ \widehat{B}_{i-1/2} = \begin{bmatrix} 2 \overline{Z}^1 & 0 \\ \overline{Z}^2 & \overline{Z}^1 \end{bmatrix}$$and$$ \widehat{C}_{i-1/2} = \begin{bmatrix} \overline{Z}^2 & \overline{Z}^1 \\ 2 g \overline{Z}^1 \overline{h} & 2 \overline{Z}^2 \end{bmatrix}.$$ $$ \widehat{B}_{i-1/2} = \begin{bmatrix} 2 \overline{Z}^1 & 0 \\ \overline{Z}^2 & \overline{Z}^1 \end{bmatrix} \quad \quad \widehat{C}_{i-1/2} = \begin{bmatrix} \overline{Z}^2 & \overline{Z}^1 \\ 2 g \overline{Z}^1 \overline{h} & 2 \overline{Z}^2 \end{bmatrix}$$Therefore$$ \widehat{A}_{i-1/2} = \widehat{C}_{i-1/2} \widehat{C}^{-1}_{i-1/2} = \begin{bmatrix} 0 & 1 \\ -\left(\frac{\overline{Z}^2}{\overline{Z}^1} \right)^2 + g \overline{h} & 2 \frac{\overline{Z}^2}{\overline{Z}^1} \end{bmatrix} = \begin{bmatrix} 0 & 1\\ -\widehat{u}^2 + g \overline{h} & 2 \widehat{u} \end{bmatrix}$$where$$ \overline{h} = \frac{1}{2} (h_{i-1} + h_i)$$and$$ \widehat{u} = \frac{\overline{Z}^2}{\overline{Z}^1} = \frac{u_{i-1} \sqrt{h_{i-1}} + u_i \sqrt{h_i}}{\sqrt{h_{i-1}} + \sqrt{h_i}}$$ Sonic Entropy FixesOne of the biggest drawbacks to a Roe linearized Riemann solver is that the solution formally only consists of shocks. Even in the scalar case the Roe condition can be satisfied by$$ \widehat{\mathcal{A}}_{i-1/2} = \frac{f(Q_i) - f(Q_{i-1})}{Q_i - Q_{i-1}}$$where here $\widehat{\mathcal{A}}_{i-1/2}$ is a scalar. This is of course the shock speed.As mentioned before numerically this is only a problem for transonic rarefactions where$$ f'(q_\ell) < 0 < f'(q_r)$$for the scalar case (these are of course the edges of the rarefaction wave).The same holds true for systems of equations when a particular wave is a transonic rarefaction. For the shallow water equations we can easily check if one of the two waves is a transonic rarefaction with the following computation:$$\begin{aligned} \lambda^1_{i-1} = u_{i-1} - \sqrt{g h_{i-1}} & & \lambda^1_m = u_m - \sqrt{g h_m} \\ \lambda^2_{m} = u_{m} - \sqrt{g h_{m}} & & \lambda^2_i = u_i - \sqrt{g h_i}.\end{aligned}$$Similar to the previous condition if any of these values in a row are separated by zero then we know we have a transonic rarefaction.The biggest impediment to using these conditions is that we need to know $q_m$. For simple systems this may not be too hard a burden as we know that if there is a transonic rarefaction there can be only one. Assuming there is one we can use the simplification that we need to know $\xi = x/t = 0$. For instance in the 1-rarefaction case we know$$\begin{aligned} \widehat{h}_{i-1/2} &= \frac{\left(u_{i-1} + 2 \sqrt{g h_{i-1}} \right)^2}{9g} \\ \widehat{u}_{i-1/2} &= u_{i-1} - 2 \left( \sqrt{g h_{i-1}} - \sqrt{g \widehat{h}_{i-1/2}} \right)\end{aligned}$$ Harten-Hyman Entropy FixA easier and more general approach to entropy fixes is due to Harten and Hyman and is generally the approach used in many Clawpack solvers.The principle approach is this, suppose that a transonic rarefaction exists in the $k$-family and therefore $$ \lambda^k_\ell < 0 < \lambda^k_r$$ and with$$\begin{aligned} q^k_\ell &= Q_{i-1} + \sum^{k-1}_{p=1} \mathcal{W}^p \\ q^k_r &= q_\ell^k + \mathcal{W}^k,\end{aligned}$$in other words the state to the left and right of the rarefaction. Now replace the single wave $\mathcal{W}^k$ propagating with speed $\widehat{\lambda}^l$ by two waves$$ \mathcal{W}^k_\ell = \beta \mathcal{W}^k \quad \mathcal{W}^k_r = (1 - \beta) \mathcal{W}^k$$propagating at speeds $\lambda^k_\ell$ and $\lambda^k_r$ respectively. Maintaining conservation requires$$ \lambda^l_\ell \mathcal{W}^k_\ell + \lambda^k_r \mathcal{W}^k_r = \widehat{\lambda}^k \mathcal{W}^k$$and therefore$$ \beta = \frac{\lambda^k_r - \widehat{\lambda}^k}{\lambda^k_r - \lambda^k_\ell}.$$This amounts to splitting the wave into two pieces traveling to the left and right and therefore modifying the fluctuations $\mathcal{A}^\pm \Delta Q$. Numerical ViscosityOne way to view the entropy problem as mentioned before is that not enough viscosity is being introduced into the solution. Numerical viscosity can solve this for us and we can modify Roe's linearization to account for this. The numerical flux for Roe's method is$$\begin{aligned} F_{i-1/2} &= \frac{1}{2} [f(Q_{i-1}) + f(Q_i) ] - \frac{1}{2} \left | \widehat{A}_{i-1/2} \right | (Q_i - Q_{i-1}) \\ &=\frac{1}{2} [f(Q_{i-1}) + f(Q_i) ] - \frac{1}{2} \sum_p \left | \widehat{\lambda}^p_{i-1/2} \right | \mathcal{W}^p_{i-1/2}.\end{aligned}$$The sum in the last expression can be looked upon as being a form of viscosity.If a transonic rarefaction is present then we expect that one of the eigenvalues $\widehat{\lambda}^p_{i-1/2}$ is very close to zero and the corresponding term in the last sum will see very little viscosity. This is in fact often what we observe, a stationary shock where there should be none since the corresponding speed is identically zero. ###Code def true_solution(x, t): if t > 0: t_vec = t * numpy.ones(x.shape) return (x < 0) * -numpy.ones(x.shape) + \ (-t_vec < x) * (x <= 0) * (x / t_vec + 1) + \ (0 <= x) * (x <= 2*t_vec) * x / t_vec + \ (2 * t_vec <= x) * 2.0 * numpy.ones(x.shape) else: return (x < 0) * -numpy.ones(x.shape) + \ (0.0 <= x) * 2.0 * numpy.ones(x.shape) def burgers_animation(order=2, efix=True): solver = pyclaw.ClawSolver1D(riemann.burgers_1D_py.burgers_1D) solver.kernel_language = "Python" solver.limiters = pyclaw.limiters.tvd.MC solver.bc_lower[0] = pyclaw.BC.extrap solver.bc_upper[0] = pyclaw.BC.extrap solver.order = order x = pyclaw.Dimension(-3.0, 3.0, 50, name='x') domain = pyclaw.Domain(x) num_eqn = 1 state = pyclaw.State(domain, num_eqn) xc = domain.grid.x.centers state.q[0,:] = (xc < 0) * -numpy.ones(xc.shape) + 2.0 * (xc >= 0) * numpy.ones(xc.shape) state.problem_data['efix'] = efix claw = pyclaw.Controller() claw.tfinal = 1.0 claw.num_output_times = 10 claw.solution = pyclaw.Solution(state,domain) claw.solver = solver claw.keep_copy = True claw.run() x = claw.frames[0].grid.dimensions[0].centers fig = plt.figure() axes = plt.subplot(1, 1, 1) axes.set_xlim((x[0], x[-1])) axes.set_ylim((-1.5, 2.5)) axes.set_title("Burgers Equation") def init(): axes.set_xlim((x[0], x[-1])) axes.set_ylim((-1.5, 2.5)) computed_line, = axes.plot(x[0], claw.frames[0].q[0, :][0], 'bo-') true_line, = axes.plot(x[0], claw.frames[0].q[0, :][0], 'k-') return (computed_line, true_line) computed_line, true_line = init() def fplot(n): computed_line.set_data([x,], [claw.frames[n].q[0, :]]) true_line.set_data([x,], [true_solution(x, claw.frames[n].t)]) return (computed_line, true_line) frames_to_plot = range(0, len(claw.frames)) plt.close(fig) return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100, blit=True, init_func=init, repeat=False) HTML(burgers_animation(order=1, efix=False).to_jshtml()) ###Output _____no_output_____ ###Markdown If we implement the above idea instead using the wave-propagation formulation and $\mathcal{A}^\pm \Delta Q$ we get an additional detail with the numerical flux written as$$ F_{i-1/2} = \frac{1}{2} [f(Q_{i-1}) + f(Q_i) ] - \frac{1}{2} \sum_p \left [ (\widehat{\lambda}^p_{i-1/2})^+ - (\widehat{\lambda}^p_{i-1/2})^- \right ] \mathcal{W}^p_{i-1/2}$$that allows us to apply the Harten-Hyman entropy fix. Harten's Entropy FixAnother entropy fix proposed by Harten is based on increasing the viscosity only by modifying the field that contains the eigenvalue that may be too close to zero. This follows that we replace $|\widehat{\lambda}^p_{i-1/2})|$ by a limited value$$ \phi_\delta(\widehat{\lambda}^p_{i-1/2}))$$where$$ \phi_\delta(\lambda) = \left \{ \begin{aligned} &|\lambda| & & \text{if } \lambda \geq \delta \\ &\frac{\lambda^2 + \delta^2}{2 \delta} & & \text{if } \lambda < \delta \end{aligned} \right .$$which effectively changes the absolute value in the original Roe flux to be perturbed from zero. Unfortunately this approach requires tuning the parameter $\delta$ for each problem. Failure of Linearized SolversLinearized solvers can be a powerful way to reduce the computational cost of finite volume solvers but when might they go wrong? One of the most common happens near "vacuum states", states where one of the conserved quantities goes to zero. For the Euler equations this occurs when $\rho \rightarrow 0$ and in the shallow water equations when $h \rightarrow 0$. For both of these cases we require $\rho, h \geq 0$. So what goes wrong? We have assumed that the eigenvectors will intersect somewhere similar to where the true Hugoniot loci or integral curves intersect. HLL and HLLE SolversAnother approach to an approximate Riemann solver uses only two waves regardless of the true number of waves. This involves estimating the waves that form the edges of the Riemann fan and using these waves with one intermediate state. Define the two waves now as$$ \mathcal{W}^1_{i-1/2} = \widehat{Q}_{i-1/2} - Q_{i-1} \quad \mathcal{W}^2_{i-1/2} = Q_{i} - \widehat{Q}_{i-1/2}$$where $\widehat{Q}_{i-1/2}$ is the middle state. Requiring conservation we want these waves to satisfy$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \sum^2_{p=1} s^p_{i-1/2} \mathcal{W}^p_{i-1/2} \\ &= s^1_{i-1/2} \mathcal{W}^1_{i-1/2} + s^2_{i-1/2} \mathcal{W}^2_{i-1/2} \\ &= s^1_{i-1/2} (\widehat{Q}_{i-1/2} - Q_{i-1}) + s^2_{i-1/2} (Q_{i} - \widehat{Q}_{i-1/2})\end{aligned}$$implying$$ \widehat{Q}_{i-1/2} = \frac{f(Q_i) - f(Q_{i-1}) - s^2_{i-1/2} Q_i + s^1_{i-1/2} Q_{i-1}}{s^1_{i-1/2} - s^2_{i-1/2}}.$$This approach was originally suggested by Harten, Lax and Van Lear with Einfeldt suggesting a choice of $s^1$ and $s^2$ of$$\begin{aligned} s^1_{i-1/2} &= \min_p \left( \min \left(\lambda^p_i, \widehat{\lambda}^p_{i-1/2} \right ) \right ) \\ s^2_{i-1/2} &= \max_p \left( \max \left(\lambda^p_{i+1}, \widehat{\lambda}^p_{i-1/2} \right ) \right )\end{aligned}$$where $\lambda^p_j$ is the $p$th eigenvalue of the Jacobian $f'(Q_j)$ and $\widehat{\lambda}^p_{i-1/2}$ is the $p$th eigenvalue of the Roe average values. Note that this choice of speeds reduces to the Roe approximation when the waves chosen are shocks. In the case where these are rarefactions these speeds will take the leading edge of the rarefaction.The fact however that we are only using two waves to represent the full Riemann fan has an obvious disadvantage if you want the details of the Riemann problem to be used. High-Resolution MethodsWe can also extend Godunov's method to the high-resolution methods already discussed and are essentially the same as for linear systems. The method we studied already takes the form$$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{\Delta x} \left( \mathcal{A}^-_{i+1/2} + \mathcal{A}^+ \Delta Q_{i-1/2} \right )- \frac{\Delta t}{\Delta x} \left( \widetilde{F}_{i+1/2} - \widetilde{F}_{i-1/2} \right )$$with$$ \widetilde{F}_{i-1/2} = \frac{1}{2} \sum^{M_w}_{p=1} |s^p_{i-1/2} | \left ( 1- \frac{\Delta t}{\Delta x} |s^p_{i-1/2}| \right) \widetilde{\mathcal{W}}^p_{i-1/2}$$where $\widetilde{\mathcal{W}}^p_{i-1/2}$ is a limited version of $\mathcal{W}^p_{i-1/2}$. There are several complications for nonlinear systems with this approach. For shock waves the general approach still works but if a wave is a rarefaction the definition of the speed is less clear. In practice $$ s^p = \frac{1}{2} (\lambda^p_\ell + \lambda^p_r)$$is often used. The limiters can also be problematic as waves from neighboring grid cells edges may not be collinear so it is not clear that comparing the magnitude of these vectors is not clearly the right thing to do. This similar to variable-coefficient linear systems and can be addressed similarly.In `clawpack` the general approach is to project the neighboring waves onto the wave being limited to obtain a vector that can be directly compared. ###Code def shock_tube(riemann_solver, efix=False): solver = pyclaw.ClawSolver1D(riemann_solver) solver.kernel_language = "Python" solver.num_waves = 3 solver.bc_lower[0] = pyclaw.BC.wall solver.bc_upper[0] = pyclaw.BC.wall x = pyclaw.Dimension(-1.0, 1.0, 800, name='x') domain = pyclaw.Domain([x]) state = pyclaw.State(domain, 3) # Ratio of specific heats gamma = 1.4 state.problem_data['gamma'] = gamma state.problem_data['gamma1'] = gamma - 1.0 state.problem_data['efix'] = efix x = state.grid.x.centers rho_l = 1.; rho_r = 1./8 p_l = 1.; p_r = 0.1 state.q[0 ,:] = (x<0.)*rho_l + (x>=0.)*rho_r state.q[1,:] = 0. velocity = state.q[1, :] / state.q[0,:] pressure = (x<0.)*p_l + (x>=0.)*p_r state.q[2 ,:] = pressure / (gamma - 1.) + 0.5 * state.q[0,:] * velocity**2 claw = pyclaw.Controller() claw.tfinal = 0.4 claw.solution = pyclaw.Solution(state,domain) claw.solver = solver claw.num_output_times = 10 claw.keep_copy = True claw.run() fig, axes = plt.subplots(1, 2) fig.set_figwidth(fig.get_figwidth() * 2) def init(): density_line, = axes[0].plot(x[0], claw.frames[0].q[0, :][0], 'k') axes[0].set_title(r"Density $\rho$") axes[0].set_xlim((-1, 1)) axes[0].set_ylim((-0.1, 1.25)) energy_line, = axes[1].plot(x[0], claw.frames[0].q[2, :][0], 'k') axes[1].set_title(r"Energy $E$") axes[1].set_xlim((-1, 1)) axes[1].set_ylim((-0.1, 4.0)) return (density_line, energy_line) density_line, energy_line = init() def fplot(n): density_line.set_data([x,], [claw.frames[n].q[0, :]]) energy_line.set_data([x,], [claw.frames[n].q[2, :]]) axes[0].set_title(r"$\rho$ at $t = %s$" % claw.frames[n].t) axes[1].set_title(r"$E$ at $t = %s$" % claw.frames[n].t) return (density_line, energy_line) frames_to_plot = range(0, len(claw.frames)) plt.close(fig) return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100, blit=True, init_func=init, repeat=False) HTML(shock_tube(riemann.euler_1D_py.euler_hllc_1D, efix=True).to_jshtml()) def woodward_colella_blast(riemann_solver): solver = pyclaw.ClawSolver1D(riemann_solver) solver.kernel_language = "Python" solver.num_waves = 3 solver.limiters = 4 solver.bc_lower[0] = pyclaw.BC.wall solver.bc_upper[0] = pyclaw.BC.wall x = pyclaw.Dimension(0.0, 1.0, 800, name='x') domain = pyclaw.Domain([x]) state = pyclaw.State(domain, 3) # Ratio of specific heats gamma = 1.4 state.problem_data['gamma'] = gamma state.problem_data['gamma1'] = gamma - 1.0 x = state.grid.x.centers state.q[0, :] = 1.0 state.q[1, :] = 0.0 state.q[2, :] = ( (x < 0.1) * 1.e3 + (0.1 <= x) * (x < 0.9) * 1.e-2 + (0.9 <= x) * 1.e2 ) / (gamma - 1.0) claw = pyclaw.Controller() claw.tfinal = 0.05 claw.solution = pyclaw.Solution(state,domain) claw.solver = solver claw.num_output_times = 20 claw.keep_copy = True claw.run() fig, axes = plt.subplots(1, 2) fig.set_figwidth(fig.get_figwidth() * 2) axes[0].set_title(r"Density $\rho$") axes[0].set_xlim((0, 1)) axes[0].set_ylim((-0.1, 15.0)) axes[1].set_title(r"Energy $E$") axes[1].set_xlim((0, 1)) axes[1].set_ylim((-0.1, 2600.0)) def init(): density_line, = axes[0].plot(x[0], claw.frames[0].q[0, :][0], 'k') axes[0].set_title(r"Density $\rho$") axes[0].set_xlim((0, 1)) axes[0].set_ylim((-0.1, 15.0)) energy_line, = axes[1].plot(x[0], claw.frames[0].q[2, :][0], 'k') axes[1].set_title(r"Energy $E$") axes[1].set_xlim((0, 1)) axes[1].set_ylim((-0.1, 2600.0)) return (density_line, energy_line) density_line, energy_line = init() def fplot(n): density_line.set_data([x,], [claw.frames[n].q[0, :]]) energy_line.set_data([x,], [claw.frames[n].q[2, :]]) axes[0].set_title(r"$\rho$ at $t = %s$" % claw.frames[n].t) axes[1].set_title(r"$E$ at $t = %s$" % claw.frames[n].t) return (density_line, energy_line) frames_to_plot = range(0, len(claw.frames)) plt.close(fig) return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100, blit=True, init_func=init, repeat=False) # HTML(woodward_colella_blast(riemann.euler_1D_py.euler_roe_1D).to_jshtml()) # HTML(woodward_colella_blast(riemann.euler_1D_py.euler_hll_1D).to_jshtml()) HTML(woodward_colella_blast(riemann.euler_1D_py.euler_hllc_1D).to_jshtml()) ###Output _____no_output_____ ###Markdown Alternative Wave-Propagation Implementations for Approximate Riemann SolversA sometimes useful alternative approach to splitting the jump $Q_i - Q_{i-1}$ into waves is to instead split the jump in the fluxes with$$ f(Q_i) - f(Q_{i-1}) = \sum^{M_w}_{p=1} \mathcal{Z}^p_{i-1/2}.$$This alternative is useful in developing approximate solvers, handling source terms, and showing second order accuracy. The advantage of this approach to linearized solvers is that we are automatically satisfying Roe's condition. Assuming we have a linearized problem we can then project the jump in fluxes onto the eigenspace$$ f(Q_i) - f(Q_{i-1}) = \sum^m_{p=1} \beta^p_{i-1/2} \widehat{r}^p_{i-1/2}$$and then defining the **f-waves** as$$ \mathcal{Z}^p_{i-1/2} = \beta^p_{i-1/2} \widehat{r}^p_{i-1/2}.$$We can also define the fluctuations as$$\begin{aligned} &\widehat{\mathcal{A}}^- \Delta Q_{i-1/2} = \sum_{p:s^p_{i-1/2} < 0} \mathcal{Z}^p_{i-1/2} \\ &\widehat{\mathcal{A}}^+ \Delta Q_{i-1/2} = \sum_{p:s^p_{i-1/2} > 0} \mathcal{Z}^p_{i-1/2}\end{aligned}$$Implying that Roe's method is satisfied regardless of the linearization employed. For example the arithmetic average defined linearization$$ \widehat{\mathcal{A}}_{i-1/2} = f'\left(\frac{1}{2}(Q_{i} + Q_{i-1})\right)$$will produce a conservative method where as the original wave-propagation method may not. We can also relate the types of waves, if all speeds $s^p_{i-1/2}$ are nonzero then$$ \mathcal{W}^p_{i-1/2} = \frac{1}{s^p_{i-1/2}} \mathcal{Z}^p_{i-1/2}.$$ The second order correction terms are also slightly different. The flux used should not be$$ \widetilde{F}_{i-1/2} = \frac{1}{2} \sum^{M_w}_{p=1} \text{sgn}(s^p_{i-1/2}) \left( 1- \frac{\Delta t}{\Delta x} |s^p_{i-1/2}| \right) \widetilde{\mathcal{Z}}^p_{i-1/2}.$$ Second-Order AccuracyOne thing we have not yet discussed is whether the method we have proposed is truly second-order accurate for smooth solutions when limiters are not used. We know that the scalar theory does imply this but does it extend to systems? First we must compute the local truncation error keeping in mind we are assuming that the solution is smooth at this time. We will of course use Taylor series for this and desire to replace some of the terms of $$ q(x_i, t_{n+1}) = q(x_i, t_n) - \Delta t f(q)_x + \frac{\Delta t^2}{2} q(x_i, t_n)_{tt} + \mathcal{O}(\Delta t^3).$$For the conservation law we can compute the second time derivative so that$$\begin{aligned} q_t & = -f(q)_x \\ q_{tt} &= -(f'(q) q_t)_x = [f'(q) f(q)_x]_x\end{aligned}$$implying that$$ q(x_i, t_{n+1}) = q(x_i, t_n) - \Delta t f(q)_x + \frac{\Delta t^2}{2} [f'(q) f(q)_x]_x + \mathcal{O}(\Delta t^3).$$ We now assume that the method uses the f-wave approach, splitting the jump in fluxes$$ f(Q_i) - f(Q_{i-1}) = \sum^m_{p=1} \mathcal{Z}^p_{i-1/2}$$where $\mathcal{Z}^p_{i-1/2}$ are assumed to be eigenvectors of some matrix $\widehat{A}_{i-1/2}(Q_i, Q_{i-1})$. We will also use the definition$$ \mathcal{Z}^p_{i-1/2} = s^p_{i-1/2} \mathcal{W}^p_{i-1/2}$$to relate this to the original wave-propagation method. We must now make a couple of assumption about the consistency of $\widehat{A}_{i-1/2}$ with the Jacobian. This takes the form of$$ \widehat{A}_{i-1/2}(q(x), q(x + \Delta x)) = f'(q(x + \Delta x / 2)) + E(x, \Delta x)$$where the error satisfies$$ E(x, \Delta x) = \mathcal{O}(\Delta x)$$and$$ \frac{E(x + \Delta x, \Delta x) - E(x, \Delta x)}{\Delta x} = \mathcal{O}(\Delta x).$$In the end we then want$$ \widehat{A}(q(x), q(x + \Delta x)) = f'(q(x + \Delta x/2)) + \mathcal{O}(\Delta x^2)$$and therefore we can choose$$ \widehat{A}(Q_{i}, Q_{i-1}) = f'(\widehat{Q}_{i-1/2}).$$Note that this also implies that $\widehat{A}$ need only be a first order accurate approximation to $f'(q)$ at the midpoint. Now for the fun part, writing out the update in all of its "glory":$$\begin{aligned} Q^{n+1}_i &= Q^n_i - \frac{\Delta t}{\Delta x} \left[ \sum_{p:s^p_{i-1/2} > 0} \mathcal{Z}^p_{i-1/2} + \sum_{p:s^p_{i-1/2} > 0} \mathcal{Z}^p_{i+1/2} \right] - \frac{\Delta t}{2 \Delta x} \left[ \sum^m_{p=1} \text{sgn}(s^p_{i+1/2}) \left(1 - \frac{\Delta t}{\Delta x} |s^p_{i+1/2}| \right) \mathcal{Z}^p_{i+1/2} - \sum^m_{p=1} \text{sgn}(s^p_{i-1/2}) \left(1 - \frac{\Delta t}{\Delta x} |s^p_{i-1/2}| \right) \mathcal{Z}^p_{i-1/2}\right] \\ &= Q^n_i - \frac{\Delta t}{2 \Delta x} \left[ \sum^m_{p=1} \mathcal{Z}^p_{i-1/2} + \sum_{p=1} \mathcal{Z}^p_{i+1/2} \right] + \frac{\Delta t^2}{2 \Delta x^2} \left[\sum_{p=1} s^p_{i+1/2} \mathcal{Z}^p_{i+1/2} - \sum^m_{p=1} s^p_{i-1/2} \mathcal{Z}^p_{i-1/2} \right] \\ &= Q^n_i - \frac{\Delta t}{2 \Delta x} \left[ \sum^m_{p=1} \mathcal{Z}^p_{i-1/2} + \sum_{p=1} \mathcal{Z}^p_{i+1/2} \right] + \frac{\Delta t^2}{2 \Delta x^2} \left[ \widehat{A}_{i+1/2} \sum_{p=1} \mathcal{Z}^p_{i+1/2} - \widehat{A}_{i-1/2} \sum^m_{p=1}\mathcal{Z}^p_{i-1/2} \right].\end{aligned}$$ Using the continuity assumption then leads to $$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{2 \Delta x} [f(Q_{i+1}) - f(Q_{i-1})] - \frac{\Delta t^2}{2 \Delta x^2} \left \{ \widehat{A}_{i+1/2} [f(Q_{i+1}) - f(Q_i)] - \widehat{A}_{i-1/2} [f(Q_i) - f(Q_{i-1})] \right \},$$which agrees with the Taylor series$$ q(x_i, t_{n+1}) = q(x_i, t_n) - \Delta t f(q)_x + \frac{\Delta t^2}{2} [f'(q) f(q)_x]_x + \mathcal{O}(\Delta t^3).$$to the required accuracy. Total Variation for SystemsAs mentioned previously the notion of TV-stability cannot naturally be extended to systems and therefore there is no proof that even Godunov's method converges for general systems of nonlinear conservation laws. The situation is actually worse than that, in general there is no proof of an existence of a solution for general nonlinear systems. It bears therefore some merit in delving into what has been done and why TV-stability fails in this situation. One might try to define TV for a system as the following:$$ TV(q) = \sup \sum^N_{j=1} ||q(\xi_j) - q(\xi_{j-1})||$$with some arbitrary discretization of the domain. If we restrict our attention to piecewise constant grid functions on the full real-line then this reduces to$$ TV(Q) = \sum^\infty_{i=-\infty} ||Q_i - Q_{i-1}||.$$We could hope that if the above definitions hold that we could prove a similar type of stability and therefore convergence.However we run into a problem as the true solution itself is not TVD. In fact we can choose initial conditions that can cause the TV(Q) to arbitrarily grow (but finite). ###Code def swe_rp(h, u=[0.0, 0.0], N=100, ylimits=((0.0, 3.5), (-0.5, 2))): solver = pyclaw.ClawSolver1D(riemann.shallow_1D_py.shallow_fwave_1d) solver.kernel_language = "Python" solver.num_waves = 2 solver.num_eqn = 2 solver.fwave = True solver.limiters = [pyclaw.limiters.tvd.MC, pyclaw.limiters.tvd.MC] solver.bc_lower[0] = pyclaw.BC.extrap solver.bc_upper[0] = pyclaw.BC.extrap solver.aux_bc_lower[0] = pyclaw.BC.extrap solver.aux_bc_upper[0] = pyclaw.BC.extrap x = pyclaw.Dimension(-5.0, 5.0, N, name='x') domain = pyclaw.Domain(x) state = pyclaw.State(domain, 2, 1) xc = domain.grid.x.centers state.q[0,:] = h[0] * (xc < 0) * numpy.ones(xc.shape) + h[1] * (xc >= 0) * numpy.ones(xc.shape) state.q[1,:] = u[0] * (xc < 0) * numpy.ones(xc.shape) + u[1] * (xc >= 0) * numpy.ones(xc.shape) state.q[1,:] *= state.q[0, :] state.aux[0, :] = numpy.zeros(xc.shape) state.problem_data['grav'] = 1.0 state.problem_data['dry_tolerance'] = 1e-3 state.problem_data['sea_level'] = 0.0 claw = pyclaw.Controller() claw.tfinal = 2.0 claw.num_output_times = 10 claw.solution = pyclaw.Solution(state,domain) claw.solver = solver claw.keep_copy = True claw.run() x = claw.frames[0].grid.dimensions[0].centers fig, axes = plt.subplots(1, 2) fig.set_figwidth(fig.get_figwidth() * 2) axes[0].set_xlim((x[0], x[-1])) axes[0].set_ylim(ylimits[0]) axes[0].set_title(r"$h$") axes[1].set_xlim((x[0], x[-1])) axes[1].set_ylim(ylimits[1]) axes[1].set_title(r"$hu$") def init(): axes[0].set_xlim((x[0], x[-1])) axes[0].set_ylim(ylimits[0]) h_line, = axes[0].plot(x[0], claw.frames[0].q[0, :][0], 'bo-') axes[1].set_xlim((x[0], x[-1])) axes[1].set_ylim(ylimits[1]) hu_line, = axes[1].plot(x[0], claw.frames[0].q[1, :][0], 'bo-') return (h_line, hu_line) h_line, hu_line = init() def fplot(n): h_line.set_data([x,], [claw.frames[n].q[0, :]]) hu_line.set_data([x,], [claw.frames[n].q[1, :]]) axes[0].set_title(r"$h$ at $t = %s$" % claw.frames[n].t) axes[1].set_title(r"$hu$ at $t = %s$" % claw.frames[n].t) return (h_line, hu_line) frames_to_plot = range(0, len(claw.frames)) plt.close(fig) return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100, blit=True, init_func=init, repeat=False) HTML(swe_rp(h=[1, 1], u=[1, -1], ylimits=((0, 2.6), (-1.1, 1.1))).to_jshtml()) ###Output _____no_output_____
Assignment 6.ipynb
###Markdown 5.a ###Code def f(x): return (1/np.sqrt(2*np.pi)) * (np.exp(-np.square(x)/2)) # Generate random numbers randoms_40000_1 = np.random.uniform(low=0, high=1, size=(40000)) randoms_40000_2 = np.random.uniform(low=0, high=1, size=(40000)) randoms_40000_3 = np.random.uniform(low=0, high=1, size=(40000)) # Calculate f f_randoms_40000_1 = f(randoms_40000_1) f_randoms_40000_2 = f(randoms_40000_2) f_randoms_40000_3 = f(randoms_40000_3) # Calculate means I_40000_1 = np.mean(f_randoms_40000_1) I_40000_2 = np.mean(f_randoms_40000_2) I_40000_3 = np.mean(f_randoms_40000_3) # Print values print(f"I_40000_1 = {I_40000_1}") print(f"I_40000_2 = {I_40000_2}") print(f"I_40000_3 = {I_40000_3}") ###Output I_40000_1 = 0.3410851456229389 I_40000_2 = 0.34130154668424395 I_40000_3 = 0.34155179379915684 ###Markdown 5.c ###Code # Generate random points x1 = np.random.uniform(low=0, high=1, size=(40000)) y1 = np.random.uniform(low=0, high=1, size=(40000)) x2 = np.random.uniform(low=0, high=1, size=(40000)) y2 = np.random.uniform(low=0, high=1, size=(40000)) x3 = np.random.uniform(low=0, high=1, size=(40000)) y3 = np.random.uniform(low=0, high=1, size=(40000)) # Calculate proportion with y <= f(x) num_y_less_than_fx1 = np.where(y1 <= f(x1))[0].size num_y_less_than_fx2 = np.where(y1 <= f(x2))[0].size num_y_less_than_fx3 = np.where(y1 <= f(x3))[0].size proportion_y_less_than_fx1 = num_y_less_than_fx1 / x1.size proportion_y_less_than_fx2 = num_y_less_than_fx2 / x2.size proportion_y_less_than_fx3 = num_y_less_than_fx3 / x3.size # Print values print(f"proportion_y_less_than_fx1 = {proportion_y_less_than_fx1}") print(f"proportion_y_less_than_fx2 = {proportion_y_less_than_fx2}") print(f"proportion_y_less_than_fx3 = {proportion_y_less_than_fx3}") ###Output proportion_y_less_than_fx1 = 0.34635 proportion_y_less_than_fx2 = 0.347375 proportion_y_less_than_fx3 = 0.3459 ###Markdown Omphemetse Mangope Advanced Machine Learning Assignment 6: Training Neural net using Negative Loglikelihood Due Date: 17 June 2020 ###Code import matplotlib.pyplot as plt import pandas as pd from sklearn.preprocessing import scale from sklearn.model_selection import train_test_split import numpy as np data = pd.read_csv('weightdataset.csv',sep=';') data.head(2) x = scale(data.Weight) # Data scaling y = scale(data.Height) # Data Scaling w_input = np.random.random(2) # Randomly assigning weights x_train, x_test,y_train, y_test = train_test_split(x,y, test_size=0.3) # Splitting data into 30% test and 70% train ###Output _____no_output_____ ###Markdown Functions ###Code def des(x): # Design matrix for x values n = len(x) X = np.c_[np.ones(n), x] return(X) #Please note: Since the bias term is 1, it is treated as an x values in this case which, #is just ones throughou the whole observations def hidden(p,w_in): # hidden node computation h = p.dot(w_in) return(h) def sigmoid(x): # Sigmoid Function return(1/(1 + np.exp(-x))) def der_sigmoid(x): return(sigmoid(x) * (1 - sigmoid(x))) def y_w(x,w): # Derivative of y_pred with respect to weights return((1/(1 + np.exp(-x*w)))*(1/(1 + np.exp(-x*w))) * x) X_train = des(x_train) # Fitting training data to design Matrix X_test = des(x_test) # Fitting testing data to design matrix u11 = hidden(X_train,w_input) # Calculating the output of the hidden without sigmoid o11 = sigmoid(X_train) # Output of the sigmoid function, note: 011=y_pred derivative = y_w(X_test,w_input) # Derivative of y_pred with respect to w # Sigmoid function has been used to squash the values of y_train to lie between 0 and 1 # Therefore, the data shows that the data solution lies between zero and therefore, the solution can be chosen between 0 and 1 plt.figure(figsize=(14,6)) plt.plot(X_train, o11, 'b*') plt.title("Output values of train data fitted to sigmoid function") plt.xlabel('x') plt.ylabel('y') plt.show() epochs = 100 # Number of iterations lr = 0.001 # learning rate y_pred = sigmoid(X_test) # prediction def optimize(y_i,y_pred,w,x,epochs,lr, derivative): cost = [] weights = [] nll = 0 n = len(x) for i in range(epochs): y_i = y_i.reshape(len(y_i),1) # reshaping y values to be to have n x 1 dimensions nll = -np.sum((y_i *np.log(w) + (1 - y_i)*np.log(1 - w))) # negative Loglikelihood cost function err = derivative * nll # Backpropagation. derivative of sigmoid is defined under functions section above cost.append(err[-1]) w = w - (lr*(1/n)*sum(y_pred - y_i)) weights.append(w) return(cost, weights) error,updated_weights = optimize(y_test,y_pred,w_input,X_test,epochs,lr,der_sigmoid(X_test)) weights = updated_weights[-1] weights # weights updated plt.figure(figsize=(14,6)) plt.plot(error, color = 'r') plt.title('Negative loglikelihood Cost') plt.xlabel('Number of iterations') plt.ylabel('Cost') plt.show() ###Output _____no_output_____ ###Markdown Mean life expectancy of Asian countries ###Code Asian.lifeExp.mean() fig=px.box(Asian, y='lifeExp') fig.show() ###Output _____no_output_____ ###Markdown Deviation in GDP of each country in europe and south America ###Code data.std() Gdp_dev=data.groupby(['country','continent'])['gdpPercap'].std().reset_index() EuropeGdp=Gdp_dev[Gdp_dev['continent']=='Europe'] fig=px.line(EuropeGdp, y='gdpPercap', x='country', title="EUROPE STANDARD DEVIATION GDP PER CAP ") fig.show() Gdp_dev=data.groupby(['country','continent'])['gdpPercap'].std().reset_index() EuropeGdp=Gdp_dev[Gdp_dev['continent']=='Americas'] fig=px.line(EuropeGdp, y='gdpPercap', x='country', title="Americas STANDARD DEVIATION GDP PER CAP ") fig.show() ###Output _____no_output_____ ###Markdown Change in population of the African countries in 3 decades ###Code data.dtypes population=data[data['continent']=='Africa'][['year','country','pop']] population population=population[(population['year']==1987) | (population['year']==1997) | (population['year']==2007)] population fig=px.bar(population, y='pop', x='country', color='year') fig.update_layout( autosize=False, width=1000, height=800,) fig.show() ###Output _____no_output_____ ###Markdown Import Modules ###Code import requests import pandas as pd website_url = requests.get("https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M").text ###Output _____no_output_____ ###Markdown get website data ###Code from bs4 import BeautifulSoup soup = BeautifulSoup(website_url,"lxml") ###Output _____no_output_____ ###Markdown iterate through table rows ###Code pcs = [] prevP = "" currN = "" prevB = "" for table_row in soup.select("table.wikitable tr"): cells = table_row.findAll('td') if len(cells) > 0: pc = cells[0].text.strip() b = cells[1].text.strip() n = cells[2].text.strip() if ((b == "Not assigned" and n=="Not assigned" or b == "blank")): a="skipping" else: if (n =="Not Assigned"): n = b; #neighbourhood becomes borough if (pc == prevP): currN = currN + "," + n #seperate if more than one with comma else: if (prevP != ""): pcs.append([prevP,prevB,currN]) prevP = pc prevB = b currN = n pcs.append([prevP,prevB,currN]) df = pd.DataFrame(pcs,columns=['Postal Code','Borough','Neighborhood']).sort_values(by=['Postal Code']) ###Output _____no_output_____ ###Markdown show first 5 enties ###Code df.head() df.shape import pandas as pd import requests import csv import pandas as pd import io import requests url="http://cocl.us/Geospatial_data" s=requests.get(url).content df2=pd.read_csv(io.StringIO(s.decode('utf-8'))) df2.head() df3 = pd.merge(df, df2, on="Postal Code") import folium import numpy as np from sklearn.cluster import KMeans import matplotlib.cm as cm import matplotlib.colors as colors from geopy.geocoders import Nominatim import datetime from pandas.io.json import json_normalize %matplotlib inline data = df3[df3['Borough'].str.contains('Toronto', regex = False)].reset_index(drop=True) data.head() col_names = ['Postal Code','Borough','Neighborhood','Latitude','Longitude'] toronto_neigh = pd.DataFrame(columns = col_names) #toronto_neigh for i in range(data.shape[0]): postcode = data.loc[i, 'Postal Code'] borough = data.loc[i, 'Borough'] lat = data.loc[i, 'Latitude'].astype(float) lng = data.loc[i, 'Longitude'].astype(float) neigh = data.loc[i,'Neighborhood'].split(", ") for j in range(len(neigh)): toronto_neigh = toronto_neigh.append(pd.DataFrame(np.array([[postcode, borough, neigh[j], lat, lng]]), columns = col_names)) toronto_neigh = toronto_neigh.reset_index(drop = True) toronto_neigh.head() # create folium map toronto_map = folium.Map(location = [lat, lng], zoom_start = 11 ) for lat, lng, borough, neighborhood in zip(toronto_neigh['Latitude'], toronto_neigh['Longitude'], toronto_neigh['Borough'],toronto_neigh['Neighborhood']): label = '{}, {} ({}, {})'.format(neighborhood, borough, lat, lng) label = folium.Popup(label, parse_html= True) folium.CircleMarker([float(lat),float(lng)], radius = 3, popup = label, color = 'red', fill = True, fill_color = '#a72920', fill_opacity = 0.5, parse_html = False).add_to(toronto_map) display(toronto_map) ###Output _____no_output_____ ###Markdown api call data ###Code now = datetime.datetime.now() date = "%4d%02d%02d" % (now.year, now.month, now.day) CLIENT_ID = '3ECJQTXHODVLXC0PN5LT5NM2ABWKXK4YORSKACOYAQ1RBOU1' # Foursquare ID CLIENT_SECRET = '0XCMHV3VM5B3MDYANVU20ARUNNHL2LOPJ0DZNQYOYSJWTZ41' # Foursquare Secret print('Your credentails:') print('CLIENT_ID: ' + CLIENT_ID) print('CLIENT_SECRET:' + CLIENT_SECRET) VERSION = date i = 0 latitude = toronto_neigh.loc[i, 'Latitude'] # neighborhood latitude value longitude = toronto_neigh.loc[i, 'Longitude'] # neighborhood longitude value neighborhood_name = toronto_neigh.loc[i, 'Neighborhood'] # neighborhood name radius = 500 limit = 100 url = "https://api.foursquare.com/v2/venues/search?client_id={}&client_secret={}&ll={},{}&v={}&radius={}&limit={}".format( CLIENT_ID, CLIENT_SECRET, latitude, longitude, VERSION, radius, limit) results = requests.get(url).json() def get_category_type(row): try: categories_list = row['categories'] except: categories_list = row['venue.categories'] if len(categories_list) == 0: return None else: return categories_list[0]['name'] ###Output _____no_output_____ ###Markdown call for venue data from neighborhoods ###Code venues = results['response']['venues'] nearby_venues = json_normalize(venues) # filter columns filtered_columns = ['name', 'categories', 'location.lat', 'location.lng'] nearby_venues =nearby_venues.loc[:, filtered_columns] nearby_venues['categories'] = nearby_venues.apply(get_category_type, axis=1) nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns] def getNearbyVenues(names, latitudes, longitudes, radius=500, LIMIT = 100): venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print(name) url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, LIMIT) results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name, lat, lng, v['venue']['name'], v['venue']['location']['lat'], v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in results]) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Neighborhood', 'Neighborhood Latitude', 'Neighborhood Longitude', 'Venue', 'Venue Latitude', 'Venue Longitude', 'Venue Category'] return(nearby_venues) ###Output _____no_output_____ ###Markdown get venue data ###Code toronto_venues = getNearbyVenues(names=toronto_neigh['Neighborhood'], latitudes=toronto_neigh['Latitude'], longitudes=toronto_neigh['Longitude'] ) toronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix="", prefix_sep="") toronto_onehot['Neighborhood'] = toronto_venues['Neighborhood'] #neighborhood column to the first column fixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1]) toronto_onehot = toronto_onehot[fixed_columns] toronto_grouped = toronto_onehot.groupby(['Neighborhood']).mean().reset_index() def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] ###Output _____no_output_____ ###Markdown pre process data ready for kcluster ###Code num_top_venues = 5 indicators = ['st', 'nd', 'rd'] # create columns according to number of top venues columns = [] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) # create a new dataframe neighborhoods_venues_sorted = pd.DataFrame(columns=columns) neighborhoods_venues_sorted['Neighborhood'] = toronto_grouped['Neighborhood'] fixed_columns = [neighborhoods_venues_sorted.columns[-1]] + list(neighborhoods_venues_sorted.columns[:-1]) neighborhoods_venues_sorted = neighborhoods_venues_sorted[fixed_columns] for ind in np.arange(toronto_grouped.shape[0]): neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues) # set number of clusters kclusters = 5 toronto_grouped_clustering = toronto_grouped.drop('Neighborhood', 1) #cluster kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering) neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_) toronto_merged = toronto_neigh # merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood') ###Output _____no_output_____ ###Markdown map data with clusters shown ###Code map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11) x = np.arange(kclusters) ys = [i + x + (i*x)**2 for i in range(kclusters)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] markers_colors = [] for lat, lon, poi, cluster in zip(toronto_merged['Latitude'], toronto_merged['Longitude'], toronto_merged['Neighborhood'], toronto_merged['Cluster Labels']): label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [float(lat), float(lon)], radius=5, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=0.7).add_to(map_clusters) map_clusters ###Output _____no_output_____ ###Markdown 1. Map The mean life expectancy of all the Asian countries according to the data. ###Code asian_life_expectancy=Asian.groupby(["country",'iso_alpha'])["lifeExp"].mean().reset_index() asian_life_expectancy.head() px.choropleth(asian_life_expectancy, locations="iso_alpha",color='lifeExp', color_continuous_scale="Viridis",scope="asia",hover_name="country") ###Output _____no_output_____ ###Markdown 2. Deviation in GDP of each country in Europe and South America. ###Code Eurosa=df[(df.continent=="Europe") |(df.continent=="South America")] Eurosa.head() dev=Eurosa.groupby(["country",'iso_alpha'])["gdpPercap"].std().reset_index() dev.head() px.choropleth(dev, locations="iso_alpha",color='gdpPercap', color_continuous_scale="Viridis",scope="europe",hover_name="country") ###Output _____no_output_____ ###Markdown 3. The change in population of each African country in the last 3 decades. ###Code df.year.unique() df.continent.unique() Africathen=df[(df.continent=="Africa")& (df.year==1977)] Africathen.head() Africanow=df[(df.continent=="Africa")& (df.year==2007)] Africanow.head() pop=Africanow[["pop","country"]] popd=pop.rename(columns={"pop":"pop_2007"}) popd.head() popchange=Africathen[["pop","country","iso_alpha"]] popchange.head() pchange=pd.merge(popd,popchange,how="inner",on="country") pchange.head() pchange["change"]=(pchange["pop_2007"]-pchange["pop"])*100/pchange["pop_2007"] pchange.head() px.choropleth(pchange, locations="iso_alpha",color='change', color_continuous_scale="Viridis",scope="africa",hover_name="country") ###Output _____no_output_____ ###Markdown ###Code createdFile = open("FCS.txt", 'w') createdFile.write("ABC"); createdFile.close(); try: file = open("FCS.txt",'r') file.write("ABC") except Exception as e: print("The file gave us error - ", e) file = open("FCS.txt",'r') print(file.read()) finally: file.close() # ques 2 - unit test given number is it Prime or not %%writefile Is_PrimeNumber.py def isPrimeNumber(numberToCheck): return (numberToCheck<=3) or ( (numberToCheck%2 != 0) and (numberToCheck%3 != 0)) %%writefile unittestof_Prime.py import unittest import Is_PrimeNumber class TestPrime(unittest.TestCase): def testprime_withPrime(self): result= Is_PrimeNumber.isPrimeNumber(31) self.assertEqual(result, True) def testprime_withNonPrime(self): result= Is_PrimeNumber.isPrimeNumber(4) self.assertEqual(result, False) if __name__ == '__main__': unittest.main() ! python unittestof_Prime.py ###Output .. ---------------------------------------------------------------------- Ran 2 tests in 0.000s OK ###Markdown ASSIGNMENT 6 Question 1 Program to create a class name bank_account ###Code class bank_account(): def __init__(self,ownerName,balance): self.ownerName = ownerName self.balance = balance def deposit(self): deposit_amt = input("Please enter the amount you want to DEPOSIT : ") deposit_amt = int(deposit_amt) self.balance= self.balance+deposit_amt print("Amount is deposited !") print("Your total balance is ",self.balance) def withdraw(self): withdrawal_amt= input("Please enter the amount you want to WITHDRAWAL : ") withdrawal_amt = int(withdrawal_amt) if withdrawal_amt < self.balance : self.balance = self.balance-withdrawal_amt print("Withdrawal Successful!") print("Remainig Balance is ",self.balance) else: print("You don't have sufficent balance to withdraw.") A001 = bank_account("John",45000) A001.deposit() A001.withdraw() A001.deposit() A001.withdraw() ###Output Please enter the amount you want to WITHDRAWAL : 49000 Withdrawal Successful! Remainig Balance is 50000 ###Markdown Question 2 Program to create cone ###Code import math class cone : def __init__(self,radius,height): self.radius = radius self.height = height def surfacearea(self): base = (math.pi)*(math.pow(self.radius,2)) conical_surface =(math.pi)*self.radius*(math.sqrt((math.pow(self.radius,2))+(math.pow(self.height,2)))) print("Surface area of cone is-") print("Base = ",base,"sq.units") print("Conical Surface = ",conical_surface,"sq.units") def volume(self): volume =(math.pi)*(math.pow(self.radius,2))*self.height/3 print("Volume of cone = ",volume,"cubic units") cone1 = cone(5,6) cone1.surfacearea() cone1.volume() cone2 = cone(7,5) cone2.volume() cone2.surfacearea() ###Output Volume of cone = 256.56340004316644 cubic units Surface area of cone is- Base = 153.93804002589985 sq.units Conical Surface = 189.1750130391168 sq.units
notebooks/classification_notebook.ipynb
###Markdown Load an experimental data ###Code # Load an example dataset from sklearn.datasets import load_breast_cancer dataset = load_breast_cancer() X = dataset.data y = dataset.target feature_names = dataset.feature_names target_names = dataset.target_names ###Output _____no_output_____ ###Markdown Cross-validate an example classifier ###Code from helpers.classification.validation import cross_validate_classifier from sklearn.linear_model import LogisticRegression # Initialize the classifier classifier = LogisticRegression(random_state=seed, solver="lbfgs") # Define the classification options threshold = 0.5 metrics = ("acc", "sen", "spe") num_folds = 10 num_repetitions = 20 # Cross-validate the classifier results = cross_validate_classifier(X, y, classifier, threshold=threshold, metrics=metrics, num_folds=num_folds, num_repetitions=num_repetitions, seed=seed) print("-------------------------") print("Cross-validation results:") print("-------------------------") print("") for metric in metrics: metric_avg = float(np.mean(results[metric])) metric_std = float(np.std(results[metric])) print("{} = {:.2f} +- {:.2f}".format(metric, metric_avg, metric_std)) ###Output ------------------------- Cross-validation results: ------------------------- acc = 0.94 +- 0.00 sen = 0.96 +- 0.00 spe = 0.91 +- 0.01 ###Markdown Plot the classification graphs ###Code from helpers.classification.visualization import plot_classification from sklearn.linear_model import LogisticRegression # Initialize the classifier classifier = LogisticRegression(random_state=seed, solver="lbfgs") # Get example feature X_dim1 = X[:, 0] X_dim2 = X[:, 1] X_dim1_label = feature_names[0] X_dim2_label = feature_names[1] # Make sure X, y are 2-dimensional X_temp = X_dim1.reshape((len(X_dim1), 1)) y_temp = y.reshape((len(y), 1)) # Fit the classifier classifier.fit(X_temp, y_temp) # Evaluate the classifier y_hat = classifier.predict(X_temp) # Plot the classification graph plot_classification(X_dim1, X_dim2, y_temp, y_hat, metrics=("acc", "sen", "spe"), fig_size=(12, 5), fig_show=False, save_as=None, x_label=X_dim1_label, y_label=X_dim2_label) plt.savefig("classification_plots.png", bbox_inches="tight") plt.show() ###Output _____no_output_____ ###Markdown Load an experimental data ###Code # Load an example dataset from sklearn.datasets import load_breast_cancer dataset = load_breast_cancer() X = dataset.data y = dataset.target feature_names = dataset.feature_names target_names = dataset.target_names ###Output _____no_output_____ ###Markdown Cross-validate an example classifier ###Code from helpers.classification.validation import cross_validate_classifier from sklearn.linear_model import LogisticRegression # Initialize the classifier classifier = LogisticRegression(random_state=seed, solver="lbfgs") # Define the classification options threshold = 0.5 metrics = ("acc", "sen", "spe") num_folds = 10 num_repetitions = 20 # Cross-validate the classifier results = cross_validate_classifier(X, y, classifier, threshold=threshold, metrics=metrics, num_folds=num_folds, num_repetitions=num_repetitions, seed=seed) print("-------------------------") print("Cross-validation results:") print("-------------------------") print("") for metric in metrics: metric_avg = float(np.mean(results[metric])) metric_std = float(np.std(results[metric])) print("{} = {:.2f} +- {:.2f}".format(metric, metric_avg, metric_std)) ###Output ------------------------- Cross-validation results: ------------------------- acc = 0.94 +- 0.02 sen = 0.96 +- 0.04 spe = 0.91 +- 0.07 ###Markdown Plot the classification graphs ###Code from helpers.classification.visualization import plot_classification from sklearn.linear_model import LogisticRegression # Initialize the classifier classifier = LogisticRegression(random_state=seed, solver="lbfgs") # Get example feature X_dim1 = X[:, 0] X_dim2 = X[:, 1] X_dim1_label = feature_names[0] X_dim2_label = feature_names[1] # Make sure X, y are 2-dimensional X_temp = X_dim1.reshape((len(X_dim1), 1)) y_temp = y.reshape((len(y), 1)) # Fit the classifier classifier.fit(X_temp, y_temp) # Evaluate the classifier y_hat = classifier.predict(X_temp) # Plot the classification graph plot_classification(X_dim1, X_dim2, y_temp, y_hat, metrics=("acc", "sen", "spe"), fig_size=(12, 5), fig_show=True, x_label=X_dim1_label, y_label=X_dim2_label) ###Output _____no_output_____
module3-cross-validation/DS_223_assignment.ipynb
###Markdown Lambda School Data Science*Unit 2, Sprint 2, Module 3*--- Cross-Validation Assignment- [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)- [ ] Commit your notebook to your fork of the GitHub repo.**You can't just copy** from the lesson notebook to this assignment.- Because the lesson was **regression**, but the assignment is **classification.**- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.So you will have to adapt the example, which is good real-world practice.1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.htmlcommon-cases-predefined-values)4. If youโ€™re doing a multi-class classification problem โ€” such as whether a waterpump is functional, functional needs repair, or nonfunctional โ€”ย then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)) Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Add your own stretch goals!- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? BONUS: Stacking!Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)``` ###Code %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/' !pip install category_encoders==2.* # If you're working locally: else: DATA_PATH = '../data/' import pandas as pd import numpy as np # Merge train_features.csv & train_labels.csv train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')).set_index('id') # Read test_features.csv & sample_submission.csv test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv') sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv') ###Output _____no_output_____ ###Markdown Wrangle DataImport Data ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')) test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv', index_col='id') train.head(3) ###Output _____no_output_____ ###Markdown EDA ###Code # from pandas_profiling import ProfileReport # profile = ProfileReport(train, minimal=True).to_notebook_iframe() # profile train.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 59400 entries, 0 to 59399 Data columns (total 41 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 id 59400 non-null int64 1 amount_tsh 59400 non-null float64 2 date_recorded 59400 non-null object 3 funder 55765 non-null object 4 gps_height 59400 non-null int64 5 installer 55745 non-null object 6 longitude 59400 non-null float64 7 latitude 59400 non-null float64 8 wpt_name 59400 non-null object 9 num_private 59400 non-null int64 10 basin 59400 non-null object 11 subvillage 59029 non-null object 12 region 59400 non-null object 13 region_code 59400 non-null int64 14 district_code 59400 non-null int64 15 lga 59400 non-null object 16 ward 59400 non-null object 17 population 59400 non-null int64 18 public_meeting 56066 non-null object 19 recorded_by 59400 non-null object 20 scheme_management 55523 non-null object 21 scheme_name 31234 non-null object 22 permit 56344 non-null object 23 construction_year 59400 non-null int64 24 extraction_type 59400 non-null object 25 extraction_type_group 59400 non-null object 26 extraction_type_class 59400 non-null object 27 management 59400 non-null object 28 management_group 59400 non-null object 29 payment 59400 non-null object 30 payment_type 59400 non-null object 31 water_quality 59400 non-null object 32 quality_group 59400 non-null object 33 quantity 59400 non-null object 34 quantity_group 59400 non-null object 35 source 59400 non-null object 36 source_type 59400 non-null object 37 source_class 59400 non-null object 38 waterpoint_type 59400 non-null object 39 waterpoint_type_group 59400 non-null object 40 status_group 59400 non-null object dtypes: float64(3), int64(7), object(31) memory usage: 19.0+ MB ###Markdown Problems we need to deal with:High cardinalityZeroes an NaNNaNsFeatures constant values in columns ###Code def wrangle (X): X = X.copy() # Latitude - replacing the funky latitude measurements with 0 X['latitude'] = X['latitude'].replace(-2e-08, 0) # Latitude and Longitude 0's to NaN for col in ['latitude', 'longitude']: X[col] = X[col].replace(0, np.nan) # Drop high cardinality columns hc_cols = [col for col in X.describe(include='object').columns if X[col].nunique() > 100] X = X.drop(hc_cols, axis=1) # Actually dropping hc_cols(high cardinality) # Drop Columns (`quantity_group` is repeated with `quantity`) X = X.drop(['quantity_group', 'recorded_by', 'payment_type', 'num_private', 'extraction_type_group', 'extraction_type_class', 'payment', 'source'], axis=1) return X train = wrangle(train) test = wrangle(test) train.head() train['construction_year'] = train['construction_year'].replace(0, 1999) ###Output _____no_output_____ ###Markdown Target vector feature matrix SPLIT ###Code y = train['status_group'] X = train.drop('status_group', axis=1) from sklearn.model_selection import train_test_split X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) ###Output _____no_output_____ ###Markdown Establish Baseline ###Code print('Baseline Accuracy:', y_train.value_counts(normalize=True).max()) ###Output Baseline Accuracy: 0.5429713804713805 ###Markdown Build Model ###Code # Import Libraries for building a model from sklearn.pipeline import make_pipeline from category_encoders import OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from category_encoders import OrdinalEncoder from sklearn.ensemble import RandomForestClassifier model = make_pipeline( OneHotEncoder(use_cat_names=True), SimpleImputer(), StandardScaler(), LogisticRegression(n_jobs=-1) ) model.fit(X, y) ###Output /usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead. import pandas.util.testing as tm ###Markdown Check Metrics ###Code print('Training Accuracy:', model.score(X_train, y_train)) print('Validation Accuracy:', model.score(X_val, y_val)) ###Output Training Accuracy: 0.7286405723905723 Validation Accuracy: 0.7296296296296296 ###Markdown Tune the Model ###Code # Import GridSearchCV (cross-validation) from sklearn.model_selection import GridSearchCV rfc_model = make_pipeline( OrdinalEncoder(), SimpleImputer(), RandomForestClassifier(max_depth=18, n_jobs=-1, random_state=42) # <-- max_depth, n_estimators ) # The ranges you want to test, as a dictionary params = {'randomforestclassifier__n_estimators': range(50, 201, 50), # 4 options 'randomforestclassifier__max_depth': range(5, 26, 10)} # 3 options # Create your gridsearch gs = GridSearchCV(rfc_model, param_grid=params, n_jobs=-1, verbose=1, cv=5, scoring='accuracy' ) gs.fit(X_train, y_train); # What are the best set of hyperparameters? gs.best_params_ # What if I want to save my best model? best_model = gs.best_estimator_ print('Training Accuracy:', best_model.score(X_train, y_train)) print('Validation Accuracy:', best_model.score(X_val, y_val)) best_model.fit(X_train, y_train) print('Validation Accuracy', best_model.score(X_val, y_val)) y_pred = best_model.predict(X_val) test['prediction'] = best_model.predict(test) submission = test.filter(['prediction'], axis=1) submission.columns =['status_group'] submission['id'] = submission.index submission = submission.rename_axis('index1').reset_index() submission.drop('index1',axis=1,inplace=True) submission = submission[['id', 'status_group']] submission submission.to_csv(r'drew-submission-01-rfc.csv', index=False) from google.colab import files files.download('drew-submission-01-rfc.csv') ###Output _____no_output_____
lectures/Week5 answers.ipynb
###Markdown OverviewWe're now switching focus away from the Network Science (for a little bit), beginning to think about _Natural Language Processing_ instead. In other words, today will be all about teaching your computer to "understand" text. This ties in nicely with our work on Reddit, because subbmisions and comments often contain text. We've looked at the network so far - now, let's see if we can include the text. Today is about * Installing the _natural language toolkit_ (NLTK) package and learning the basics of how it works (Chapter 1)* Figuring out how to make NLTK to work with other types of text (Chapter 2). > **_Video Lecture_**. [Intro to Natural Language processing](https://www.youtube.com/watch?v=Ph0EHmFT3n4). Today is all about working with NLTK, so not much lecturing - we will start with a perspective on text analysis by Sune (you will hear him talking about Wikipedia data here and there. Everything he sais applies to Reddit data as well!) ###Code from IPython.display import YouTubeVideo YouTubeVideo("Ph0EHmFT3n4",width=800, height=450) ###Output _____no_output_____ ###Markdown Installing and the basics> _Reading_> The reading for today is Natural Language Processing with Python (NLPP) Chapter 1, Sections 1.1, 1.2, 1.3\. [It's free online](http://www.nltk.org/book/). > *Exercises*: NLPP Chapter 1\.> > * First, install `nltk` if it isn't installed already (there are some tips below that I recommend checking out before doing installing)> * Second, work through chapter 1. The book is set up as a kind of tutorial with lots of examples for you to work through. I recommend you read the text with an open IPython Notebook and type out the examples that you see. ***It becomes much more fun if you to add a few variations and see what happens***. Some of those examples might very well be due as assignments (see below the install tips), so those ones should definitely be in a `notebook`. NLTK Install tips Check to see if `nltk` is installed on your system by typing `import nltk` in a `notebook`. If it's not already installed, install it as part of _Anaconda_ by typing conda install nltk at the command prompt. If you don't have them, you can download the various corpora using a command-line version of the downloader that runs in Python notebooks: In the iPython notebook, run the code import nltk nltk.download()Now you can hit `d` to download, then type "book" to fetch the collection needed today's `nltk` session. Now that everything is up and running, let's get to the actual exercises. ###Code from nltk.book import * def lexical_diversity(text): return len(set(text)) / len(text) def percentage(count, total): return 100 * count / total ###Output _____no_output_____ ###Markdown > *Exercises: NLPP Chapter 1 (the stuff that might be due in an upcoming assignment).> > The following exercises from Chapter 1 are what might be due in an assignment later on.> > * Try out the `concordance` method, using another text and a word of your own choosing. ###Code # text6: Monty Python and the Holy Grail text6.concordance('rabbit') text6.concordance('holy') ###Output Displaying 25 of 26 matches: [ angels sing ] Arthur , this is the Holy Grail . Look well , Arthur , for it rpose , Arthur ... the quest for the Holy Grail . [ boom ] [ singing stops ] L he can join us in our quest for the Holy Grail . FRENCH GUARD : Well , I ' ll gy was required if the quest for the Holy Grail were to be brought to a succes GALAHAD : You are the keepers of the Holy Grail ? ZOOT : The what ? GALAHAD : CELOT : No , we ' ve got to find the Holy Grail . Come on ! GALAHAD : Oh , let uld be the sign that leads us to the Holy Grail ! Brave , brave Concorde , you ung people in the joyful bond of the holy wedlock . Unfortunately , one of the ' ve not given up your quest for the Holy Grail ? MINSTREL : [ singing ] He is TIM : I do . [ zoosh ] You seek the Holy Grail ! ARTHUR : That is our quest . , we ' re -- we ' re looking for the Holy Grail . Our quest is to find the Hol oly Grail . Our quest is to find the Holy Grail . KNIGHTS : Yeah . Yes . It is TIM : Yes , I can help you find the Holy Grail . KNIGHTS : Oh , thank you . O n the last resting place of the most Holy Grail . ARTHUR : Where could we find RTHUR : No . LAUNCELOT : We have the Holy Hand Grenade . ARTHUR : Yes , of cou ade . ARTHUR : Yes , of course ! The Holy Hand Grenade of Antioch ! ' Tis one him ! Brother Maynard ! Bring up the Holy Hand Grenade ! MONKS : [ chanting ] ng , ' First shalt thou take out the Holy Pin . Then , shalt thou count to thr , be reached , then lobbest thou thy Holy Hand Grenade of Antioch towards thy iant and pure of spirit may find the Holy Grail in the Castle of uuggggggh '. on peril was no more . The quest for Holy Grail could continue . SCENE 23 : [ your quest ? LAUNCELOT : To seek the Holy Grail . BRIDGEKEEPER : What is your is your quest ? ROBIN : To seek the Holy Grail . BRIDGEKEEPER : What is the c is your quest ? ARTHUR : To seek the Holy Grail . BRIDGEKEEPER : What is the a Thou hast vouchsafed to us the most holy -- [ twong ] [ baaaa ] Jesus Christ ###Markdown > * Also try out the `similar` and `common_context` methods for a few of your own examples. ###Code # text7: Wall Street Journal # text4: Inaugural Address Corpus text7.similar('opportunity') text4.similar('opportunity') text4.common_contexts(['country', 'war']) ###Output the_and the_the the_has the_of a_which the_with the_for the_are of_and the_had the_to the_be this_is ###Markdown > * Create your own version of a dispersion plot ("your own version" means another text and different word). ###Code text7.dispersion_plot(['buy', 'invest', 'work', 'produce', 'save']) ###Output _____no_output_____ ###Markdown > * Explain in your own words what aspect of language _lexical diversity_ describes. - The diversity in the use of words in the text. - Calculated as the number of unique words divided by the total number of words in the text > * Create frequency distributions for `text2`, including the cumulative frequency plot for the 75 most common words. ###Code import matplotlib.pylab as plt FDist = FreqDist(text2) fig, ax = plt.subplots(figsize=(15,5)) FDist.plot(75, cumulative=True) plt.show() ###Output _____no_output_____ ###Markdown > * What is a bigram? How does it relate to `collocations`. Explain in your own words. - A bigram is a word pair - A collocation is a word pair that occurs together very often. Where the meaning of the two words are strongly depending on their pairing. So they "resist" substitution. - Collocations are word pairs that occurs together frequently compared to how often the occur separatly > * Work through ex 2-12 in NLPP's section 1.8\. 2. Given an alphabet of 26 letters, there are 26 to the power 10, or 26 ** 10, ten-letter strings we can form. That works out to 141167095653376. How many hundred-letter strings are possible? ###Code 26**100 ###Output _____no_output_____ ###Markdown 3. The Python multiplication operation can be applied to lists. What happens when you type \['Monty', 'Python'\] \* 20, or 3 \* sent1? ###Code # Gets a list repeating 'Monty', 'Python' 20 times repeat_20 = ['Monty', 'Python'] * 20 # Gets a list repeating sent 3 times repeat_3 = 3*sent1 ###Output _____no_output_____ ###Markdown 4. Review 1 on computing with language. How many words are there in text2? How many distinct words are there? ###Code words_in_text2 = len(text2) words_in_text2 distinct_words_in_text2 = len(set(text2)) distinct_words_in_text2 ###Output _____no_output_____ ###Markdown 5. Compare the lexical diversity scores for humor and romance fiction in 1.1. Which genre is more lexically diverse? Humor has the highest lexical diversity of 0.231 compared to 0.121 in fiction: romance.This means humor has the highest proportion of unique words compared to the total number of words in the genre. 6. Produce a dispersion plot of the four main protagonists in Sense and Sensibility: Elinor, Marianne, Edward, and Willoughby. What can you observe about the different roles played by the males and females in this novel? Can you identify the couples? ###Code text2.dispersion_plot(['Elinor', 'Marianne', 'Edward', 'Willoughby']) ###Output _____no_output_____ ###Markdown Elinor might be the main character and Marianne could be Elinor's close friend.Then Edward and Willoughby could be flirts/boy friends.From the dispersion plot Edward seems to occur often when Elinor does, but Marianne does not. - So Edward might be Elinor's flirt/boy friend - Also Edward is frequent in the end with ElinorWilloughby occurs more often when Marianne does, so he might be her flirt/boy friend 7. Find the collocations in text5. ###Code text5.collocations() ###Output wanna chat; PART JOIN; MODE #14-19teens; JOIN PART; PART PART; cute.-ass MP3; MP3 player; JOIN JOIN; times .. .; ACTION watches; guys wanna; song lasts; last night; ACTION sits; -...)...- S.M.R.; Lime Player; Player 12%; dont know; lez gurls; long time ###Markdown 8. Consider the following Python expression: len(set(text4)). State the purpose of this expression. Describe the two steps involved in performing this computation.* The set() function only takes one of each word, so it finds the different words in text4, but only once even if they occur more often. Then len() finds the length of the set, which gives the number of different words/tokens in text4 11. Define several variables containing lists of words, e.g., phrase1, phrase2, and so on. Join them together in various combinations (using the plus operator) to form whole sentences. What is the relationship between len(phrase1 + phrase2) and len(phrase1) + len(phrase2)?* len(phrase1 + phrase2) concatenates phrase1 and phrase2, and gives the total of number of words in phrase1 and phrase2* len(phrase1) + len(phrase2) also gives the total number of words the two phrases, but without concatenating them.it just counts the number of words in each separate sentence and then add them together 12. Consider the following two expressions, which have the same value. Which one will typically be more relevant in NLP? Why?* "Monty Python"[6:12]* ["Monty", "Python"][1]* The first will probably be more relevant in the beginning on raw text data. Here the data is often not processed, so you get everything as strings and need to extract parts of it from the raw data.Once the data is processed the second expression is probably more relevant, as we will often keep the different text representations as lists of words. - So it will depend on the work that you have to do with the text. > * Work through exercise 15, 17, 19, 22, 23, 26, 27, 28 in section 1.8\. 15. Review the discussion of conditionals in 4. Find all words in the Chat Corpus (text5) starting with the letter b. Show them in alphabetical order. ###Code b_words = [w for w in text5 if w.startswith('b')] b_words = set(sorted(b_words)) len(b_words) #b_words ###Output _____no_output_____ ###Markdown 17. Use text9.index() to find the index of the word sunset. You'll need to insert this word as an argument between the parentheses. By a process of trial and error, find the slice for the complete sentence that contains this word. ###Code sunset_index = text9.index('sunset') buffer = 5 text9[sunset_index-buffer-3:sunset_index+buffer*3] ###Output _____no_output_____ ###Markdown 19. What is the difference between the following two lines? Which one will give a larger value? Will this be the case for other texts?* sorted(set(w.lower() for w in text1))* sorted(w.lower() for w in set(text1))* First of all the first line produces a sorted set, and the second line produces a sorted list* The first line makes all words in text1 into lower case, then takes the different words in the text and sorts them - This will mean that BIG and big are seen as the same by the set() function, so only one occurence of big at the end* The second line first takes all the different words in the text, then makes them lower case, and then sorts them - This means that BIG and big are seen as different words by the set and then made to lower afterwards. So for instance if there are occurences BIG, BiG, and big, then the final sorted list will have big in it 3 times. 22. Find all the four-letter words in the Chat Corpus (text5). With the help of a frequency distribution (FreqDist), show these words in decreasing order of frequency. ###Code four_letter_words = [w for w in text5 if len(w) == 4] len(four_letter_words) Fdist_four_letter_words = FreqDist(four_letter_words) Fdist_four_letter_words import matplotlib.pylab as plt fig, ax = plt.subplots(figsize=(15,5)) Fdist_four_letter_words.plot(50) plt.show() ###Output _____no_output_____ ###Markdown 23. Review the discussion of looping with conditions in 4. Use a combination of for and if statements to loop over the words of the movie script for Monty Python and the Holy Grail (text6) and print all the uppercase words, one per line. ###Code for w in text6: if str.isupper(w): print(w) ###Output SCENE KING ARTHUR SOLDIER ARTHUR I SOLDIER ARTHUR I I SOLDIER ARTHUR SOLDIER ARTHUR SOLDIER ARTHUR SOLDIER ARTHUR SOLDIER ARTHUR SOLDIER ARTHUR SOLDIER ARTHUR SOLDIER A ARTHUR SOLDIER A ARTHUR SOLDIER ARTHUR SOLDIER I ARTHUR I SOLDIER SOLDIER SOLDIER I ARTHUR SOLDIER SOLDIER SOLDIER SOLDIER SOLDIER SOLDIER SOLDIER SOLDIER SCENE CART MASTER CUSTOMER CART MASTER DEAD PERSON I CART MASTER CUSTOMER DEAD PERSON I CART MASTER CUSTOMER DEAD PERSON I CART MASTER CUSTOMER DEAD PERSON I CUSTOMER CART MASTER I DEAD PERSON I CUSTOMER CART MASTER I DEAD PERSON I CUSTOMER CART MASTER I CUSTOMER CART MASTER I CUSTOMER CART MASTER DEAD PERSON I I CUSTOMER DEAD PERSON I I CUSTOMER CART MASTER CUSTOMER CART MASTER I CUSTOMER CART MASTER SCENE ARTHUR DENNIS ARTHUR DENNIS I ARTHUR I DENNIS I I ARTHUR I DENNIS ARTHUR I DENNIS ARTHUR I DENNIS I ARTHUR I DENNIS WOMAN ARTHUR I WOMAN ARTHUR WOMAN ARTHUR I WOMAN I I DENNIS A WOMAN DENNIS ARTHUR I WOMAN ARTHUR WOMAN ARTHUR DENNIS I ARTHUR DENNIS ARTHUR I DENNIS ARTHUR DENNIS ARTHUR I WOMAN ARTHUR I WOMAN I ARTHUR WOMAN ARTHUR I I DENNIS ARTHUR DENNIS ARTHUR DENNIS I I I ARTHUR DENNIS ARTHUR DENNIS I ARTHUR DENNIS I SCENE BLACK KNIGHT BLACK KNIGHT GREEN KNIGHT BLACK KNIGHT GREEN KNIGHT BLACK KNIGHT BLACK KNIGHT GREEN KNIGHT GREEN KNIGHT BLACK KNIGHT GREEN KNIGHT BLACK KNIGHT ARTHUR I I BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR I I BLACK KNIGHT ARTHUR I BLACK KNIGHT I ARTHUR ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR A BLACK KNIGHT ARTHUR BLACK KNIGHT I ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT I ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT ARTHUR I ARTHUR BLACK KNIGHT BLACK KNIGHT I ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT I ARTHUR BLACK KNIGHT ARTHUR BLACK KNIGHT BLACK KNIGHT ARTHUR BLACK KNIGHT I I SCENE MONKS CROWD A A A A MONKS CROWD A A A A A A A A A A A A A VILLAGER CROWD BEDEVERE VILLAGER CROWD BEDEVERE WITCH I I BEDEVERE WITCH CROWD WITCH BEDEVERE VILLAGER BEDEVERE VILLAGER VILLAGER CROWD BEDEVERE VILLAGER VILLAGER VILLAGER VILLAGER VILLAGERS VILLAGER VILLAGER VILLAGER VILLAGER A VILLAGERS A VILLAGER A VILLAGER RANDOM BEDEVERE VILLAGER BEDEVERE A VILLAGER I VILLAGER VILLAGER CROWD BEDEVERE VILLAGER VILLAGER VILLAGER CROWD BEDEVERE VILLAGER VILLAGER CROWD BEDEVERE VILLAGER VILLAGER VILLAGER BEDEVERE VILLAGER B BEDEVERE CROWD BEDEVERE VILLAGER BEDEVERE VILLAGER RANDOM BEDEVERE VILLAGER VILLAGER VILLAGER CROWD BEDEVERE VILLAGER VILLAGER VILLAGER VILLAGER VILLAGER VILLAGER VILLAGER VILLAGER VILLAGER ARTHUR A CROWD BEDEVERE VILLAGER BEDEVERE VILLAGER A VILLAGER A CROWD A A VILLAGER BEDEVERE CROWD BEDEVERE CROWD A A A WITCH VILLAGER CROWD BEDEVERE ARTHUR I BEDEVERE ARTHUR BEDEVERE I ARTHUR BEDEVERE ARTHUR I NARRATOR SCENE SIR BEDEVERE ARTHUR BEDEVERE SIR LAUNCELOT ARTHUR SIR GALAHAD LAUNCELOT PATSY ARTHUR I KNIGHTS PRISONER KNIGHTS MAN I ARTHUR KNIGHTS SCENE GOD I ARTHUR GOD I I ARTHUR I O GOD ARTHUR GOD ARTHUR O GOD LAUNCELOT A A GALAHAD SCENE ARTHUR FRENCH GUARD ARTHUR FRENCH GUARD ARTHUR FRENCH GUARD I I ARTHUR GALAHAD ARTHUR FRENCH GUARD I ARTHUR FRENCH GUARD ARTHUR FRENCH GUARD I I GALAHAD FRENCH GUARD ARTHUR FRENCH GUARD I GALAHAD ARTHUR FRENCH GUARD I I GALAHAD FRENCH GUARD I ARTHUR I FRENCH GUARD OTHER FRENCH GUARD FRENCH GUARD ARTHUR I KNIGHTS ARTHUR KNIGHTS FRENCH GUARD FRENCH GUARD ARTHUR KNIGHTS FRENCH GUARD FRENCH GUARDS LAUNCELOT I ARTHUR BEDEVERE I FRENCH GUARDS C A ARTHUR BEDEVERE I ARTHUR BEDEVERE U I ARTHUR BEDEVERE ARTHUR KNIGHTS CRASH FRENCH GUARDS SCENE VOICE DIRECTOR HISTORIAN KNIGHT KNIGHT HISTORIAN HISTORIAN S WIFE SCENE NARRATOR MINSTREL O SIR ROBIN DENNIS WOMAN ALL HEADS MINSTREL ROBIN I ALL HEADS MINSTREL ROBIN I ALL HEADS I ROBIN W I I ALL HEADS ROBIN I LEFT HEAD I MIDDLE HEAD I RIGHT HEAD I MIDDLE HEAD I LEFT HEAD I RIGHT HEAD LEFT HEAD ROBIN I LEFT HEAD I RIGHT HEAD MIDDLE HEAD LEFT HEAD RIGHT HEAD MIDDLE HEAD LEFT HEAD MIDDLE HEAD LEFT HEAD I MIDDLE HEAD RIGHT HEAD LEFT HEAD MIDDLE HEAD RIGHT HEAD LEFT HEAD ALL HEADS MIDDLE HEAD RIGHT HEAD MINSTREL ROBIN MINSTREL ROBIN I MINSTREL ROBIN MINSTREL ROBIN I MINSTREL ROBIN I MINSTREL ROBIN MINSTREL ROBIN I CARTOON MONKS CARTOON CHARACTER CARTOON MONKS CARTOON CHARACTERS CARTOON MONKS CARTOON CHARACTER VOICE CARTOON CHARACTER SCENE NARRATOR GALAHAD GIRLS ZOOT GALAHAD ZOOT GALAHAD ZOOT GALAHAD ZOOT MIDGET CRAPPER O ZOOT MIDGET CRAPPER ZOOT GALAHAD I I ZOOT GALAHAD ZOOT GALAHAD ZOOT GALAHAD I ZOOT GALAHAD I I ZOOT I GALAHAD ZOOT PIGLET GALAHAD ZOOT GALAHAD B ZOOT WINSTON GALAHAD PIGLET GALAHAD PIGLET GALAHAD I PIGLET GALAHAD I PIGLET GALAHAD I I I GIRLS GALAHAD GIRLS GALAHAD DINGO I GALAHAD I DINGO GALAHAD I I DINGO GALAHAD DINGO I GALAHAD DINGO I LEFT HEAD DENNIS OLD MAN TIM THE ENCHANTER ARMY OF KNIGHTS DINGO I GOD DINGO GIRLS A A DINGO AMAZING STUNNER LOVELY DINGO GIRLS A A DINGO GIRLS GALAHAD I LAUNCELOT GALAHAD LAUNCELOT GALAHAD LAUNCELOT GALAHAD LAUNCELOT DINGO LAUNCELOT GALAHAD LAUNCELOT GALAHAD I LAUNCELOT GIRLS GALAHAD I DINGO GIRLS LAUNCELOT GALAHAD I I DINGO GIRLS LAUNCELOT GALAHAD I DINGO GIRLS DINGO LAUNCELOT GALAHAD I I LAUNCELOT GALAHAD LAUNCELOT GALAHAD I LAUNCELOT GALAHAD LAUNCELOT GALAHAD I LAUNCELOT I NARRATOR I I CROWD NARRATOR I SCENE OLD MAN ARTHUR OLD MAN ARTHUR OLD MAN ARTHUR OLD MAN ARTHUR OLD MAN ARTHUR OLD MAN ARTHUR OLD MAN SCENE HEAD KNIGHT OF NI KNIGHTS OF NI ARTHUR HEAD KNIGHT RANDOM ARTHUR HEAD KNIGHT BEDEVERE HEAD KNIGHT RANDOM ARTHUR HEAD KNIGHT ARTHUR HEAD KNIGHT KNIGHTS OF NI ARTHUR HEAD KNIGHT ARTHUR HEAD KNIGHT ARTHUR A KNIGHTS OF NI ARTHUR PARTY ARTHUR HEAD KNIGHT ARTHUR O HEAD KNIGHT ARTHUR HEAD KNIGHT ARTHUR HEAD KNIGHT CARTOON CHARACTER SUN CARTOON CHARACTER SUN CARTOON CHARACTER SUN CARTOON CHARACTER SCENE NARRATOR FATHER PRINCE HERBERT FATHER HERBERT FATHER HERBERT B I FATHER I I I I I I HERBERT I I FATHER HERBERT I FATHER I HERBERT B I FATHER HERBERT FATHER HERBERT I FATHER HERBERT I I I FATHER I GUARD GUARD FATHER I GUARD FATHER GUARD GUARD FATHER GUARD FATHER GUARD FATHER GUARD GUARD FATHER GUARD FATHER GUARD FATHER GUARD FATHER GUARD FATHER GUARD I FATHER N GUARD FATHER GUARD FATHER GUARD GUARD FATHER GUARD FATHER GUARD GUARD FATHER GUARD FATHER GUARD FATHER GUARD GUARD GUARD I FATHER GUARD GUARD FATHER GUARD FATHER I GUARD I HERBERT FATHER GUARD FATHER SCENE LAUNCELOT CONCORDE LAUNCELOT CONCORDE LAUNCELOT I I A A CONCORDE I I LAUNCELOT CONCORDE I I I I I LAUNCELOT I CONCORDE I I LAUNCELOT I I CONCORDE LAUNCELOT CONCORDE I LAUNCELOT CONCORDE I I I SCENE PRINCESS LUCKY GIRLS GUEST SENTRY SENTRY SENTRY LAUNCELOT SENTRY LAUNCELOT PRINCESS LUCKY GIRLS LAUNCELOT GUESTS LAUNCELOT GUARD LAUNCELOT O I I HERBERT LAUNCELOT I I HERBERT LAUNCELOT I HERBERT I I LAUNCELOT I HERBERT FATHER HERBERT I FATHER LAUNCELOT I HERBERT LAUNCELOT FATHER LAUNCELOT FATHER LAUNCELOT I I HERBERT I FATHER LAUNCELOT I FATHER I HERBERT FATHER LAUNCELOT I FATHER LAUNCELOT FATHER LAUNCELOT I I I FATHER HERBERT LAUNCELOT I FATHER LAUNCELOT HERBERT I FATHER LAUNCELOT HERBERT I LAUNCELOT I HERBERT LAUNCELOT I I I FATHER HERBERT SCENE GUESTS FATHER GUEST FATHER LAUNCELOT FATHER LAUNCELOT I I I GUEST GUESTS FATHER LAUNCELOT GUEST GUESTS FATHER GUESTS FATHER I I GUEST FATHER GUEST FATHER BRIDE S FATHER GUEST FATHER I I LAUNCELOT GUEST GUESTS CONCORDE HERBERT I FATHER HERBERT I FATHER HERBERT I FATHER GUESTS FATHER GUESTS FATHER GUESTS FATHER GUESTS FATHER GUESTS CONCORDE GUESTS CONCORDE GUESTS LAUNCELOT GUESTS LAUNCELOT I GUESTS CONCORDE LAUNCELOT GUESTS LAUNCELOT GUESTS LAUNCELOT SCENE ARTHUR OLD CRONE ARTHUR CRONE ARTHUR I CRONE ARTHUR CRONE ARTHUR CRONE BEDEVERE ARTHUR BEDEVERE ARTHUR BEDEVERE ARTHUR BEDEVERE ARTHUR BEDEVERE ARTHUR ARTHUR BEDEVERE CRONE BEDEVERE ARTHUR CRONE BEDEVERE ARTHUR BEDEVERE ARTHUR BEDEVERE ROGER THE SHRUBBER ARTHUR ROGER ARTHUR ROGER I I BEDEVERE ARTHUR SCENE ARTHUR O HEAD KNIGHT I ARTHUR HEAD KNIGHT KNIGHTS OF NI HEAD KNIGHT RANDOM HEAD KNIGHT ARTHUR O HEAD KNIGHT ARTHUR RANDOM HEAD KNIGHT KNIGHTS OF NI A A A HEAD KNIGHT ARTHUR HEAD KNIGHT ARTHUR KNIGHTS OF NI HEAD KNIGHT ARTHUR HEAD KNIGHT I ARTHUR KNIGHTS OF NI HEAD KNIGHT ARTHUR KNIGHTS OF NI HEAD KNIGHT KNIGHTS OF NI BEDEVERE MINSTREL ARTHUR ROBIN HEAD KNIGHT ARTHUR MINSTREL ROBIN HEAD KNIGHT KNIGHTS OF NI ROBIN I KNIGHTS OF NI ROBIN ARTHUR KNIGHTS OF NI HEAD KNIGHT ARTHUR KNIGHTS OF NI HEAD KNIGHT ARTHUR HEAD KNIGHT I I I KNIGHTS OF NI NARRATOR KNIGHTS NARRATOR MINSTREL NARRATOR KNIGHTS NARRATOR A CARTOON CHARACTER NARRATOR CARTOON CHARACTER NARRATOR CARTOON CHARACTER NARRATOR CARTOON CHARACTER NARRATOR CARTOON CHARACTER NARRATOR SCENE KNIGHTS ARTHUR TIM THE ENCHANTER I ARTHUR TIM ARTHUR TIM ARTHUR TIM I ARTHUR O TIM ROBIN ARTHUR KNIGHTS ARTHUR BEDEVERE GALAHAD ROBIN BEDEVERE ROBIN BEDEVERE ARTHUR GALAHAD ARTHUR I I TIM A ARTHUR A TIM A ARTHUR I ROBIN Y ARTHUR GALAHAD KNIGHTS TIM ROBIN ARTHUR ROBIN GALAHAD ARTHUR ROBIN KNIGHTS ARTHUR TIM I KNIGHTS TIM ARTHUR O TIM ARTHUR SCENE GALAHAD ARTHUR TIM ARTHUR GALAHAD ARTHUR W TIM ARTHUR TIM ARTHUR TIM ARTHUR TIM ARTHUR TIM ARTHUR TIM ARTHUR TIM ROBIN I I TIM GALAHAD TIM GALAHAD ROBIN TIM I ROBIN TIM ARTHUR BORS TIM BORS ARTHUR TIM I ROBIN I TIM I I ARTHUR TIM ARTHUR TIM KNIGHTS KNIGHTS ARTHUR KNIGHTS TIM ARTHUR LAUNCELOT GALAHAD ARTHUR GALAHAD ARTHUR ROBIN ARTHUR GALAHAD ARTHUR GALAHAD LAUNCELOT ARTHUR LAUNCELOT ARTHUR MONKS ARTHUR LAUNCELOT I ARTHUR BROTHER MAYNARD SECOND BROTHER O MAYNARD SECOND BROTHER MAYNARD KNIGHTS ARTHUR GALAHAD ARTHUR SCENE ARTHUR LAUNCELOT GALAHAD ARTHUR MAYNARD GALAHAD LAUNCELOT ARTHUR MAYNARD ARTHUR MAYNARD BEDEVERE MAYNARD LAUNCELOT MAYNARD ARTHUR MAYNARD GALAHAD ARTHUR MAYNARD LAUNCELOT ARTHUR BEDEVERE GALAHAD BEDEVERE I LAUNCELOT ARTHUR LAUNCELOT KNIGHTS BEDEVERE LAUNCELOT BEDEVERE N LAUNCELOT BEDEVERE I ARTHUR GALAHAD MAYNARD BROTHER MAYNARD BEDEVERE ARTHUR KNIGHTS BEDEVERE KNIGHTS NARRATOR ANIMATOR NARRATOR SCENE GALAHAD ARTHUR ROBIN ARTHUR BEDEVERE ARTHUR GALAHAD ARTHUR GALAHAD ARTHUR ROBIN ARTHUR ROBIN I GALAHAD ARTHUR ROBIN ARTHUR ROBIN I LAUNCELOT I I ARTHUR GALAHAD ARTHUR LAUNCELOT I ARTHUR BRIDGEKEEPER LAUNCELOT I BRIDGEKEEPER LAUNCELOT BRIDGEKEEPER LAUNCELOT BRIDGEKEEPER LAUNCELOT BRIDGEKEEPER LAUNCELOT ROBIN BRIDGEKEEPER ROBIN I BRIDGEKEEPER ROBIN BRIDGEKEEPER ROBIN BRIDGEKEEPER ROBIN I BRIDGEKEEPER GALAHAD BRIDGEKEEPER GALAHAD I BRIDGEKEEPER GALAHAD BRIDGEKEEPER ARTHUR BRIDGEKEEPER ARTHUR BRIDGEKEEPER ARTHUR BRIDGEKEEPER I I BEDEVERE ARTHUR SCENE ARTHUR BEDEVERE ARTHUR BEDEVERE ARTHUR FRENCH GUARD ARTHUR I FRENCH GUARD I I ARTHUR FRENCH GUARD I ARTHUR FRENCH GUARDS ARTHUR FRENCH GUARD ARTHUR FRENCH GUARD FRENCH GUARDS ARTHUR BEDEVERE ARTHUR FRENCH GUARDS ARTHUR FRENCH GUARDS ARTHUR FRENCH GUARDS ARTHUR ARMY OF KNIGHTS HISTORIAN S WIFE I INSPECTOR OFFICER HISTORIAN S WIFE OFFICER INSPECTOR OFFICER BEDEVERE INSPECTOR OFFICER INSPECTOR OFFICER OFFICER RANDOM RANDOM OFFICER OFFICER OFFICER OFFICER INSPECTOR OFFICER CAMERAMAN ###Markdown 26. What does the following Python code do? sum(len(w) for w in text1) Can you use it to work out the average word length of a text?* It adds up the lengths of all words in text1. So we get the sum of all word lengths in text1 27. Define a function called vocab_size(text) that has a single parameter for the text, and which returns the vocabulary size of the text. ###Code def vocab_size(text): ''' Calculates the vocabulary size, meaning the number of different words (case insensitive) in a text ''' return len(set(w.lower() for w in text)) vocab_size(text5) ###Output _____no_output_____ ###Markdown 28. Define a function percent(word, text) that calculates how often a given word occurs in a text, and expresses the result as a percentage. ###Code def percent(word, text): word_occurences = sum([w == word for w in text]) return word_occurences/len(text) percent('rabbit', text6) ###Output _____no_output_____ ###Markdown Working with NLTK and other types of textChapter 2 in NLPP1e is all about getting access to nicely curated texts that you can find built into NLTK. > > Reading: NLPP Chapter 2.1 - 2.4\.> > *Exercises*: NLPP Chapter 2\.> > * Solve exercise 4, 8, 11, 15, 16, 17, 18 in NLPP1e, section 2.8\. As always, I recommend you write up your solutions nicely in a `notebook`. ###Code import nltk nltk.corpus.gutenberg.words('austen-emma.txt') from nltk.corpus import gutenberg gutenberg.fileids() ###Output _____no_output_____ ###Markdown 4. Read in the texts of the State of the Union addresses, using the state_union corpus reader. Count occurrences of men, women, and people in each document. What has happened to the usage of these words over time? ###Code from nltk.corpus import state_union print('Women Count | Men Count | People Count | Text') for fileid in state_union.fileids(): women_count = len([w for w in state_union.words(fileid) if w.lower() == 'women']) men_count = len([w for w in state_union.words(fileid) if w.lower() == 'men']) people_count = len([w for w in state_union.words(fileid) if w.lower() == 'people']) print(f'\t{women_count} \t\t {men_count} \t\t {people_count} \t\t {fileid}') ###Output Women Count | Men Count | People Count | Text 2 2 10 1945-Truman.txt 7 12 49 1946-Truman.txt 2 7 12 1947-Truman.txt 1 5 22 1948-Truman.txt 1 2 15 1949-Truman.txt 2 6 15 1950-Truman.txt 2 8 10 1951-Truman.txt 0 3 17 1953-Eisenhower.txt 0 2 15 1954-Eisenhower.txt 0 4 26 1955-Eisenhower.txt 2 2 30 1956-Eisenhower.txt 2 5 11 1957-Eisenhower.txt 1 2 19 1958-Eisenhower.txt 1 4 11 1959-Eisenhower.txt 0 2 10 1960-Eisenhower.txt 0 6 10 1961-Kennedy.txt 2 6 10 1962-Kennedy.txt 0 0 3 1963-Johnson.txt 5 8 12 1963-Kennedy.txt 1 3 3 1964-Johnson.txt 0 7 16 1965-Johnson-1.txt 3 12 14 1965-Johnson-2.txt 1 12 35 1966-Johnson.txt 1 11 25 1967-Johnson.txt 0 4 17 1968-Johnson.txt 2 5 6 1969-Johnson.txt 0 2 23 1970-Nixon.txt 0 1 32 1971-Nixon.txt 0 1 7 1972-Nixon.txt 0 1 9 1973-Nixon.txt 0 0 20 1974-Nixon.txt 0 0 14 1975-Ford.txt 1 3 18 1976-Ford.txt 1 2 19 1977-Ford.txt 1 0 26 1978-Carter.txt 1 0 15 1979-Carter.txt 2 1 12 1980-Carter.txt 1 1 11 1981-Reagan.txt 2 1 17 1982-Reagan.txt 7 3 19 1983-Reagan.txt 5 3 27 1984-Reagan.txt 1 1 12 1985-Reagan.txt 2 2 14 1986-Reagan.txt 0 1 24 1987-Reagan.txt 0 1 17 1988-Reagan.txt 3 2 13 1989-Bush.txt 2 3 9 1990-Bush.txt 2 2 14 1991-Bush-1.txt 7 7 13 1991-Bush-2.txt 4 4 27 1992-Bush.txt 2 1 45 1993-Clinton.txt 1 1 66 1994-Clinton.txt 3 1 73 1995-Clinton.txt 3 2 43 1996-Clinton.txt 2 1 31 1997-Clinton.txt 2 2 22 1998-Clinton.txt 3 2 22 1999-Clinton.txt 7 5 41 2000-Clinton.txt 3 3 15 2001-GWBush-1.txt 3 1 12 2001-GWBush-2.txt 6 3 14 2002-GWBush.txt 4 6 33 2003-GWBush.txt 8 7 21 2004-GWBush.txt 11 8 18 2005-GWBush.txt 7 7 22 2006-GWBush.txt ###Markdown women gets more mention through the year until they get to around the same as men.people is mentioned quite differently depending on the sitting president. 8. Define a conditional frequency distribution over the Names corpus that allows you to see which initial letters are more frequent for males vs. females ###Code import nltk from nltk.corpus import names cfd = nltk.ConditionalFreqDist( (fileid, w[0]) for fileid in names.fileids() for w in names.words(fileid)) import matplotlib.pylab as plt fig, ax = plt.subplots(figsize=(15,5)) cfd.plot() plt.show() ###Output _____no_output_____ ###Markdown 11. Investigate the table of modal distributions and look for other patterns. Try to explain them in terms of your own impressionistic understanding of the different genres. Can you find other closed classes of words that exhibit significant differences across different genres? 15. Write a program to find all words that occur at least three times in the Brown Corpus. ###Code import nltk from nltk.corpus import brown cfd = nltk.FreqDist(brown.words()) plus_three_ocurrences = [w for w, count in cfd.items() if count >= 3] len(plus_three_ocurrences) ###Output _____no_output_____ ###Markdown 16. Write a program to generate a table of lexical diversity scores (i.e. token/type ratios), as we saw in 1.1. Include the full set of Brown Corpus genres (nltk.corpus.brown.categories()). Which genre has the lowest diversity (greatest number of tokens per type)? Is this what you would have expected? ###Code import nltk from nltk.corpus import brown lexical_diversity_table = { genre: lexical_diversity(brown.raw(categories=genre)) for genre in brown.categories() } lexical_diversity_table ###Output _____no_output_____ ###Markdown 17. Write a function that finds the 50 most frequently occurring words of a text that are not stopwords. ###Code import nltk from nltk.corpus import stopwords stp_words = stopwords.words('english')+['.', ',', ';', '-', "'", '"', '--', '?', '!', '."', '?"', '(', ')', ',"', '!"'] def top_non_stopwords(text, top=50): cfd = nltk.FreqDist( w for w in text if w.lower() not in stp_words) return list(cfd)[:top] top_non_stopwords(text1) ###Output _____no_output_____ ###Markdown 18. Write a program to print the 50 most frequent bigrams (pairs of adjacent words) of a text, omitting bigrams that contain stopwords. ###Code import nltk from nltk.corpus import stopwords stp_words = stopwords.words('english')+['.', ',', ';', '-', "'", '"', '--', '?', '!', '."', '?"', '(', ')', ',"', '!"'] def top_non_stopword_bigrams(text, top=50): cfd = nltk.FreqDist( b for b in nltk.bigrams(text) if b[0].lower() not in stp_words and b[1].lower() not in stp_words) return list(cfd)[:top] top_non_stopword_bigrams(text1) ###Output _____no_output_____
Regression/Decision_Tree_Regressor.ipynb
###Markdown Decision Tree Regression Importing the libraries ###Code import numpy as np import matplotlib.pyplot as plt import pandas as pd ###Output _____no_output_____ ###Markdown Importing the dataset ###Code dataset = pd.read_csv('Position_Salaries.csv') X = dataset.iloc[:, 1:-1].values y = dataset.iloc[:, -1].values ###Output _____no_output_____ ###Markdown Training the Decision Tree Regression model on the whole dataset ###Code from sklearn.tree import DecisionTreeRegressor regressor = DecisionTreeRegressor(random_state = 0) regressor.fit(X, y) ###Output _____no_output_____ ###Markdown Predicting a new result ###Code regressor.predict([[6.5]]) ###Output _____no_output_____ ###Markdown Visualising the Decision Tree Regression results (higher resolution) ###Code X_grid = np.arange(min(X), max(X), 0.1) X_grid = X_grid.reshape((len(X_grid), 1)) plt.scatter(X, y, color = 'red') plt.plot(X_grid, regressor.predict(X_grid), color = 'blue') plt.title('Truth or Bluff (Decision Tree Regression)') plt.xlabel('Position level') plt.ylabel('Salary') plt.show() ###Output _____no_output_____ ###Markdown Visualising the Decision Tree Regression results (Lower resolution) ###Code plt.scatter(X, y, color = 'red') plt.plot(X, regressor.predict(X), color = 'blue') plt.title('Truth or Bluff (Decision Tree Regression)') plt.xlabel('Position level') plt.ylabel('Salary') plt.show() ###Output _____no_output_____
samples/04_gis_analysts_data_scientists/calculating_nXn_od_cost_matrix.ipynb
###Markdown Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial). ###Code import arcgis from arcgis.gis import GIS import pandas as pd import datetime import getpass from IPython.display import HTML from arcgis import geocoding from arcgis.features import Feature, FeatureSet from arcgis.features import GeoAccessor, GeoSeriesAccessor portal_url = 'https://wwww.arcgis.com' #connect to your GIS user_name = '<user_name>' password = '<password>' my_gis = GIS(portal_url, user_name, password) ###Output _____no_output_____ ###Markdown We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits). ###Code origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346'] origin_features = [] for origin in origin_coords: reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0], "y": origin.split(',')[1]}) origin_feature = Feature(geometry=reverse_geocode['location'], attributes=reverse_geocode['address']) origin_features.append(origin_feature) origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint', spatial_reference={'latestWkid': 4326}) origin_fset ###Output _____no_output_____ ###Markdown Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits. ###Code # Read csv files from data: destinations_address = r"data/destinations_address.csv" destinations_df = pd.read_csv(destinations_address) destinations_sdf = pd.DataFrame.spatial.from_df(destinations_fc, "Address") destinations_sdf.head() destinations_fset = destinations_sdf.spatial.to_featureset() destinations_fset ###Output _____no_output_____ ###Markdown With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance. ###Code %%time # solve OD cost matrix tool for the origns and destinations from arcgis.network.analysis import generate_origin_destination_cost_matrix results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong, destinations= destinations_fset, #destinations_fs_address, cutoff=200, origin_destination_line_shape='Straight Line') print('Analysis succeeded? {}'.format(results.solve_succeeded)) ###Output Analysis succeeded? True CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms Wall time: 24.7 s ###Markdown Let's see the output lines table. ###Code od_df = results.output_origin_destination_lines.sdf od_df ###Output _____no_output_____ ###Markdown Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that. ###Code # filter only the required columns od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']] # user pivot_table od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID') od_pivot ###Output _____no_output_____ ###Markdown Write the pivot table to disk ###Code od_pivot.to_csv('data/OD_Matrix.csv') ###Output _____no_output_____ ###Markdown This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm. ###Code od_map = my_gis.map('Loma Linda, CA') od_map od_map.draw(results.output_origin_destination_lines) od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10}) od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8}) ###Output _____no_output_____ ###Markdown Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial). ###Code import arcgis from arcgis.gis import GIS import pandas as pd import datetime import getpass from IPython.display import HTML from arcgis import geocoding from arcgis.features import Feature, FeatureSet from arcgis.features import GeoAccessor, GeoSeriesAccessor portal_url = 'https://wwww.arcgis.com' #connect to your GIS user_name = '<user_name>' password = '<password>' my_gis = GIS(portal_url, user_name, password) ###Output _____no_output_____ ###Markdown We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits). ###Code origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346'] origin_features = [] for origin in origin_coords: reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0], "y": origin.split(',')[1]}) origin_feature = Feature(geometry=reverse_geocode['location'], attributes=reverse_geocode['address']) origin_features.append(origin_feature) origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint', spatial_reference={'latestWkid': 4326}) origin_fset ###Output _____no_output_____ ###Markdown Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits. ###Code # Read csv files from data: destinations_address = r"data/destinations_address.csv" destinations_df = pd.read_csv(destinations_address) destinations_sdf = pd.DataFrame.spatial.from_df(destinations_fc, "Address") destinations_sdf.head() destinations_fset = destinations_sdf.spatial.to_featureset() destinations_fset ###Output _____no_output_____ ###Markdown With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance. ###Code %%time # solve OD cost matrix tool for the origns and destinations from arcgis.network.analysis import generate_origin_destination_cost_matrix results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong, destinations= destinations_fset, #destinations_fs_address, cutoff=200, origin_destination_line_shape='Straight Line') print('Analysis succeeded? {}'.format(results.solve_succeeded)) ###Output Analysis succeeded? True CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms Wall time: 24.7 s ###Markdown Let's see the output lines table. ###Code od_df = results.output_origin_destination_lines.sdf od_df ###Output _____no_output_____ ###Markdown Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that. ###Code # filter only the required columns od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']] # user pivot_table od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID') od_pivot ###Output _____no_output_____ ###Markdown Write the pivot table to disk ###Code od_pivot.to_csv('data/OD_Matrix.csv') ###Output _____no_output_____ ###Markdown This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm. ###Code od_map = my_gis.map('Loma Linda, CA') od_map od_map.draw(results.output_origin_destination_lines) od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10}) od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8}) ###Output _____no_output_____ ###Markdown Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial). ###Code import arcgis from arcgis.gis import GIS import pandas as pd import datetime import getpass from IPython.display import HTML from arcgis import geocoding from arcgis.features import Feature, FeatureSet from arcgis.features import GeoAccessor, GeoSeriesAccessor my_gis = GIS('home') ###Output _____no_output_____ ###Markdown We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits). ###Code origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346'] origin_features = [] for origin in origin_coords: reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0], "y": origin.split(',')[1]}) origin_feature = Feature(geometry=reverse_geocode['location'], attributes=reverse_geocode['address']) origin_features.append(origin_feature) origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint', spatial_reference={'latestWkid': 4326}) origin_fset ###Output _____no_output_____ ###Markdown Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits. ###Code # Read csv files from data: destinations_address = r"data/destinations_address.csv" destinations_df = pd.read_csv(destinations_address) destinations_sdf = pd.DataFrame.spatial.from_df(destinations_df, "Address") destinations_sdf.head() destinations_fset = destinations_sdf.spatial.to_featureset() destinations_fset ###Output _____no_output_____ ###Markdown With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance. ###Code %%time # solve OD cost matrix tool for the origns and destinations from arcgis.network.analysis import generate_origin_destination_cost_matrix results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong, destinations= destinations_fset, #destinations_fs_address, cutoff=200, origin_destination_line_shape='Straight Line') print('Analysis succeeded? {}'.format(results.solve_succeeded)) ###Output Analysis succeeded? True CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms Wall time: 24.7 s ###Markdown Let's see the output lines table. ###Code od_df = results.output_origin_destination_lines.sdf od_df ###Output _____no_output_____ ###Markdown Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that. ###Code # filter only the required columns od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']] # user pivot_table od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID') od_pivot ###Output _____no_output_____ ###Markdown Write the pivot table to disk ###Code od_pivot.to_csv('data/OD_Matrix.csv') ###Output _____no_output_____ ###Markdown This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm. ###Code od_map = my_gis.map('Loma Linda, CA') od_map od_map.draw(results.output_origin_destination_lines) od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10}) od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8}) ###Output _____no_output_____ ###Markdown Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial). ###Code import arcgis from arcgis.gis import GIS import pandas as pd import datetime import getpass from IPython.display import HTML from arcgis import geocoding from arcgis.features import Feature, FeatureSet from arcgis.features import GeoAccessor, GeoSeriesAccessor portal_url = 'https://wwww.arcgis.com' #connect to your GIS user_name = '<user_name>' password = '<password>' my_gis = GIS(portal_url, user_name, password) ###Output _____no_output_____ ###Markdown We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits). ###Code origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346'] origin_features = [] for origin in origin_coords: reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0], "y": origin.split(',')[1]}) origin_feature = Feature(geometry=reverse_geocode['location'], attributes=reverse_geocode['address']) origin_features.append(origin_feature) origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint', spatial_reference={'latestWkid': 4326}) origin_fset ###Output _____no_output_____ ###Markdown Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits. ###Code # Read csv files from data: destinations_address = r"data/destinations_address.csv" destinations_df = pd.read_csv(destinations_address) destinations_sdf = pd.DataFrame.spatial.from_df(destinations_fc, "Address") destinations_sdf.head() destinations_fset = destinations_sdf.spatial.to_featureset() destinations_fset ###Output _____no_output_____ ###Markdown With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance. ###Code %%time # solve OD cost matrix tool for the origns and destinations from arcgis.network.analysis import generate_origin_destination_cost_matrix results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong, destinations= destinations_fset, #destinations_fs_address, cutoff=200, origin_destination_line_shape='Straight Line') print('Analysis succeeded? {}'.format(results.solve_succeeded)) ###Output Analysis succeeded? True CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms Wall time: 24.7 s ###Markdown Let's see the output lines table. ###Code od_df = results.output_origin_destination_lines.sdf od_df ###Output _____no_output_____ ###Markdown Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that. ###Code # filter only the required columns od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']] # user pivot_table od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID') od_pivot ###Output _____no_output_____ ###Markdown Write the pivot table to disk ###Code od_pivot.to_csv('data/OD_Matrix.csv') ###Output _____no_output_____ ###Markdown This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm. ###Code od_map = my_gis.map('Loma Linda, CA') od_map od_map.draw(results.output_origin_destination_lines) od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10}) od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8}) ###Output _____no_output_____ ###Markdown Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Get destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial). ###Code import arcgis from arcgis.gis import GIS import pandas as pd import datetime import getpass from IPython.display import HTML from arcgis import geocoding from arcgis.features import Feature, FeatureSet from arcgis.features import GeoAccessor, GeoSeriesAccessor my_gis = GIS('home') ###Output _____no_output_____ ###Markdown We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits). ###Code origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346'] origin_features = [] for origin in origin_coords: reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0], "y": origin.split(',')[1]}) origin_feature = Feature(geometry=reverse_geocode['location'], attributes=reverse_geocode['address']) origin_features.append(origin_feature) origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint', spatial_reference={'latestWkid': 4326}) origin_fset ###Output _____no_output_____ ###Markdown Get destinations layer: ###Code addresses_item = my_gis.content.search('destinations_address', 'feature layer')[0] addresses_item destinations_sdf = addresses_item.layers[0].query(as_df=True) destinations_sdf destinations_fset = destinations_sdf.spatial.to_featureset() destinations_fset ###Output _____no_output_____ ###Markdown With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance. ###Code %%time # solve OD cost matrix tool for the origns and destinations from arcgis.network.analysis import generate_origin_destination_cost_matrix results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong, destinations= destinations_fset, #destinations_fs_address, cutoff=200, origin_destination_line_shape='Straight Line') print('Analysis succeeded? {}'.format(results.solve_succeeded)) ###Output Analysis succeeded? True CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms Wall time: 24.7 s ###Markdown Let's see the output lines table. ###Code od_df = results.output_origin_destination_lines.sdf od_df ###Output _____no_output_____ ###Markdown Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that. ###Code # filter only the required columns od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']] # user pivot_table od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID') od_pivot ###Output _____no_output_____ ###Markdown Write the pivot table to disk ###Code od_pivot.to_csv('data/OD_Matrix.csv') ###Output _____no_output_____ ###Markdown This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm. ###Code od_map = my_gis.map('Loma Linda, CA') od_map od_map.draw(results.output_origin_destination_lines) od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10}) od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8}) ###Output _____no_output_____
Cognitive_Systems-Mathematics_and_Methods/week02/Assignment_2.ipynb
###Markdown Visualizing the data ###Code drone_delivery_df.plot.scatter('x', 'y', s=2).set_title( 'Drone delivery destinations'); ###Output _____no_output_____ ###Markdown Looks like a forest after an emergency landing... :) ###Code from sklearn import cluster from ipywidgets import interact, fixed, widgets, interactive import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Attaching closest depot label to the destinations (for 3 clusters by k-means) ###Code clustered_3means = cluster.KMeans(3).fit(drone_delivery_df) drone_delivery_df_cluster_info = drone_delivery_df.copy() drone_delivery_df_cluster_info['cluster'] = clustered_3means.labels_ drone_delivery_df_cluster_info.head(10) def plot_clusters(df, clusters, title=None): plt.scatter(df.x, df.y, s=2, c=clusters.labels_, cmap='tab20') if hasattr(clusters, "cluster_centers_"): plt.scatter( clusters.cluster_centers_[:,0], clusters.cluster_centers_[:,1], s=80, c='red', alpha=0.5 ) if title: plt.title(title) plot_clusters(drone_delivery_df, clustered_3means, 'Near-optimal depots for 3 clusters') ###Output _____no_output_____ ###Markdown Playing a bit more with the number of depots ###Code def compute_and_show_kmeans(n_clusters, df): kmeans = cluster.KMeans(n_clusters).fit(df) plot_clusters(df, kmeans) interact(compute_and_show_kmeans, n_clusters=(1, 20), df=fixed(drone_delivery_df)); ###Output _____no_output_____ ###Markdown The computational time is quite noticable with higher amount of clusters, specifically: ###Code for i in range(3, 24, 2): print(f'Time taken for {i} clusters:') %timeit -r 2 -n 5 cluster.KMeans(i).fit(drone_delivery_df) print() ###Output Time taken for 3 clusters: 83.6 ms ยฑ 6.52 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 5 clusters: 103 ms ยฑ 1.86 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 7 clusters: 160 ms ยฑ 5.06 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 9 clusters: 264 ms ยฑ 4.65 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 11 clusters: 283 ms ยฑ 7.42 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 13 clusters: 327 ms ยฑ 47.1 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 15 clusters: 362 ms ยฑ 19.5 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 17 clusters: 394 ms ยฑ 13.9 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 19 clusters: 482 ms ยฑ 22 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 21 clusters: 430 ms ยฑ 11 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) Time taken for 23 clusters: 440 ms ยฑ 19.8 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each) ###Markdown I quite enjoyed exploring the behaviour of other algorithms, see below... ###Code # preparation for interactive input n_clusters_widget = widgets.IntSlider( description='Clusters', value=10, min=1, max=20, step=1, continuous_update=False, ) options = { "Agglomeration": ( cluster.AgglomerativeClustering, dict( n_clusters=n_clusters_widget, affinity=widgets.ToggleButtons( description='Affinity', options=['euclidean', 'l1', 'l2', 'manhattan', 'cosine', 'precomputed']), linkage=widgets.ToggleButtons( description='Linkage', options=['ward', 'complete', 'average', 'single']), ) ), "Birch": ( cluster.Birch, dict( n_clusters=n_clusters_widget, threshold=widgets.FloatSlider( description='Threshold', min=0, max=400, value=0.5, step=0.1, continuous_update=False), ) ), "K-Means": ( cluster.KMeans, dict(n_clusters=n_clusters_widget) ), } algo_choice = widgets.ToggleButtons(options=options.keys(), description='Clustering') output = widgets.Output() def get_clusterer_and_widgets(): algo_key = algo_choice.value return options[algo_key] with output: previous_settings = None def redraw_output(clear=True): if clear: output.clear_output() display(algo_choice) _, widgets = get_clusterer_and_widgets() for widget in widgets.values(): display(widget) def compute_and_show(): global previous_settings clusterer, widgets = get_clusterer_and_widgets() kwargs = {key:widget.value for key, widget in widgets.items()} # usually there are many notifications of the same event # no idea why, therefore comparing the states current_settings = (clusterer, kwargs) if current_settings != previous_settings: previous_settings = current_settings clusters = clusterer(**kwargs).fit(drone_delivery_df) redraw_output() plot_clusters(drone_delivery_df, clusters) def observe_widgets(): handler = lambda _: compute_and_show() algo_choice.observe(handler) unique_widgets = { widget for r, widget_dict in options.values() for widget in widget_dict.values() } for widget in unique_widgets: widget.observe(handler) observe_widgets() redraw_output(clear=False) display(output) ###Output _____no_output_____
notebooks/05.03-OPTIONAL-Widget_Events_2_--_Separating_Concerns.ipynb
###Markdown *OPTIONAL* Separating the logic from the widgetsA key principle in designing a graphical user interface is to separate the logic of an application from the graphical widgets the user sees. For example, in the super-simple password generator widget, the basic logic is to construct a sequence of random letters given the length. Let's isolate that logic in a function, without any widgets. This function takes a password length and returns a generated password string. ###Code def calculate_password(length): import string import secrets # Gaenerate a list of random letters of the correct length. password = ''.join(secrets.choice(string.ascii_letters) for _ in range(length)) return password ###Output _____no_output_____ ###Markdown Test out the function a couple times in the cell below with different lengths. Note that unlike our first pass through this, you can test this function without defining any widgets. This means you can write tests for just the logic, use the function as part of a library, etc. ###Code calculate_password(10) ###Output _____no_output_____ ###Markdown The Graphical ControlsThe code to build the graphical user interface widgets is the same as the previous iteration. ###Code helpful_title = widgets.HTML('Generated password is:') password_text = widgets.HTML('No password yet') password_text.layout.margin = '0 0 0 20px' password_length = widgets.IntSlider(description='Length of password', min=8, max=20, style={'description_width': 'initial'}) password_widget = widgets.VBox(children=[helpful_title, password_text, password_length]) password_widget ###Output _____no_output_____ ###Markdown Connecting the logic to the widgetsWhen the slider `password_length` changes, we want to call `calculate_password` to come up with a new password, and set the value of the widget `password` to the return value of the function call.`update_password` takes the change from the `password_length` as its argument and sets the `password_text` with the result of `calculate_password`. ###Code def update_password(change): length = int(change.new) new_password = calculate_password(length) # NOTE THE LINE BELOW: it relies on the password widget already being defined. password_text.value = new_password password_length.observe(update_password, names='value') ###Output _____no_output_____ ###Markdown Now that the connection is made, try moving the slider and you should see the password update. ###Code password_widget ###Output _____no_output_____ ###Markdown Benefits of separating concernsSome advantages of this approach are:+ Changes in `ipywidgets` only affect your controls setup.+ Changes in functional logic only affect your password generation function. If you decide that a password with only letters isn't secure enough and decide to add some numbers and/or special characters, the only code you need to change is in the `calculate_password` function.+ You can write unit tests for your `calculate_password` function -- which is where the important work is being done -- without doing in-browser testing of the graphical controls. Using interactNote that using interact to build this GUI also emphasizes the separation between the logic and the controls. However, interact also is much more opinionated about how the controls are laid out: controls are in a vbox above the output of the function. Often this is great for a quick initial GUI, but is restrictive for more complex GUIs. ###Code from ipywidgets import interact from IPython.display import display interact(calculate_password, length=(8, 20)); ###Output _____no_output_____ ###Markdown We can make the interact a bit nicer by printing the result, rather than just returning the string. This time we use `interact` as a decorator. ###Code @interact(length=(8, 20)) def print_password(length): print(calculate_password(length)) ###Output _____no_output_____
code/notebooks/render_tree.ipynb
###Markdown Prepare tree rendering package This notebooks manipulates a tree and generates rendering packages. It does: - Collapse a tree at given taxonomic rank(s) based on serveral criteria. - Generate color gradient for branch support values. - Generate files that can be directly parsed and rendered using [**iTOL**](https://itol.embl.de/) and [**FigTree**](http://tree.bio.ed.ac.uk/software/figtree/). Preparation Dependencies ###Code import re import numpy as np import pandas as pd from skbio import TreeNode ###Output _____no_output_____ ###Markdown Input files Tree file (with node IDs, and without support values) ###Code tree_fp = '../trees/release/astral.cons.nwk' ###Output _____no_output_____ ###Markdown Taxonomic information file (original or tax2tree-curated) ###Code taxonomy_fp = '../taxonomy/tax2tree/ncbi/astral/filled_ranks.tsv' ###Output _____no_output_____ ###Markdown Custom node attributes (bootstrap, estimated time range, metadata category, additional name, etc.) ###Code custom_attrs_fps = { 'lpp': '../trees/release/supports/astral.txt'} ###Output _____no_output_____ ###Markdown Parameters Collapse the tree from this rank up. For example, "class" will have the tree collapsed at class (if possible) or phylum. Leave empty or None if not needed. ###Code # collapse_rank = None collapse_rank = 'genus' ###Output _____no_output_____ ###Markdown Determine the visual length of a collapsed clade (triangle or line). Options are: mean, std (don't use), min, 25%, 50% (median), 75% and max. ###Code collapse_length_func = '50%' ###Output _____no_output_____ ###Markdown Clades with descendants less than this threshold will not be collapsed. Either a fixed number, or a rank-to-number dictionary. Example: phylum = 1, class = 10. Leave 0 if not needed. ###Code # min_clade_size = 0 # for full-scale (10k-taxon) trees min_clade_size = {'kingdom': 1, 'phylum': 1, 'class': 5, 'order': 50, 'family': 50, 'genus': 50, 'species': 50} # for 1k-taxon trees # min_clade_size = {'kingdom': 1, 'phylum': 1, 'class': 1, 'order': 5, 'family': 10, 'genus': 10, 'species': 10} # for class / phylum only: # min_clade_size = {'kingdom': 1, 'phylum': 1, 'class': 10, 'order': 0, 'family': 0, 'genus': 0, 'species': 0} ###Output _____no_output_____ ###Markdown Split clades with descendants less than this threshold of *fraction* of the dominant clade of the same taxon will not be collapsed. For example, `Firmicutes_1` has 1000 tips, then if `Firmicutes_10` has 45 tips (< 1000 * 5%), it will not be collapsed. ###Code # min_split_clade_frac = 0 min_split_clade_frac = 0.05 ###Output _____no_output_____ ###Markdown Whether to delete tips not belonging to any collapsed clades. ###Code delete_uncollapsed = True ###Output _____no_output_____ ###Markdown Whether to hide uncollapsed tip names. Effective when `delete_uncollapsed` is `False`. ###Code hide_uncollapsed = True ###Output _____no_output_____ ###Markdown Manipulate node labels using the following regular expressions (pairs of pattern and replacement). ###Code label_format_regexes = [ (r'^Candidatus ', r'Ca. '), (r'^candidate division ', r'Ca. ') ] ###Output _____no_output_____ ###Markdown Append rank code to taxon (e.g.,: `Bacteria` => `k__Bacteria`). ###Code append_rank_code = True ###Output _____no_output_____ ###Markdown Append clade size to taxon ###Code append_clade_size = True ###Output _____no_output_____ ###Markdown Low and high end of color gradient. ###Code color_range = ('#f0f0f0', '#191919') # gray # color_range = ('#deebf7', '#3182bd') # blue ###Output _____no_output_____ ###Markdown Helpers Basic utilities ###Code def sort_node_ids(d): """Sort names of tips and internal nodes.""" return sorted(d, key=lambda x: (x[0], int(x[1:]))) def digits(num): """Get number digits after decimal point.""" if not num.replace('.', '').isdigit() or num.count('.') != 1: raise ValueError('Not a valid float number: %s' % num) return len(num.split('.')[1]) def de_suffix(taxon, names): """Restore suffixed taxon name.""" if '_' not in taxon: return taxon res = '_'.join(taxon.split('_')[:-1]) return res if res in names else taxon ###Output _____no_output_____ ###Markdown Node dimension calculation ###Code def get_clade_dimensions(node): """Calculate the dimensions of a clade. Parameters ---------- node : skbio.TreeNode clade to calculate Returns ------- pd.Series count, mean, std, min, 25%, 50%, 75%, max """ lengths = pd.Series(x.accumulate_to_ancestor(node) for x in node.tips()) return lengths.describe() ###Output _____no_output_____ ###Markdown Selective tree shearing and pruning ###Code def selective_prune(tree, tips_to_keep, nodes_to_keep=[]): """Shear a tree and selectively prune it. Parameters ---------- tree : skbio.TreeNode tree to shear tips_to_keep : iterable of str tip names to keep nodes_to_keep : iterable of str internal node names to keep Returns ------- tree : skbio.TreeNode resulting tree Notes ----- Inherited from scikit-bio's `shear` and `prune` functions, but will selectively remove internal nodes. """ tcopy = tree.deepcopy() ids = set(tips_to_keep) marked = set() for tip in tcopy.tips(): if tip.name in ids: marked.add(tip) for anc in tip.ancestors(): if anc in marked: break else: marked.add(anc) for node in list(tcopy.traverse()): if node not in marked: node.parent.remove(node) ids = set(nodes_to_keep) nodes_to_remove = [] for node in tcopy.traverse(include_self=False): if len(node.children) == 1: if node.name not in ids: nodes_to_remove.append(node) for node in nodes_to_remove: child = node.children[0] if child.length is None or node.length is None: child.length = child.length or node.length else: child.length += node.length if node.parent is None: continue node.parent.append(child) node.parent.remove(node) return tcopy ###Output _____no_output_____ ###Markdown Newick string formatting ###Code def format_newick(tree, operators=',:_;()[] ', digits=None): """Generate a Newick string from a tree. Parameters ---------- tree : skbio.TreeNode tree to convert to a Newick string operators : str list of characters that have special meaning in a tree file so that a node name containing any of them must be quoted digits : int or tuple of (int, int) number of digits (float and scientific) to print in a branch length Returns ------- str formatted Newick string Notes ----- Modified from scikit-bio's `_tree_node_to_newick`. In addition to the prototype, it can do: 1. Keep spaces without converting them to underscores. 2. Print branch lengths based on given precision. """ res = '' operators = set(operators or '') if isinstance(digits, int): digits = (digits, digits) current_depth = 0 nodes_left = [(tree, 0)] while len(nodes_left) > 0: entry = nodes_left.pop() node, node_depth = entry if node.children and node_depth >= current_depth: res += '(' nodes_left.append(entry) nodes_left += ((child, node_depth + 1) for child in reversed(node.children)) current_depth = node_depth + 1 else: if node_depth < current_depth: res += ')' current_depth -= 1 if node.name: escaped = "%s" % node.name.replace("'", "''") if any(t in operators for t in node.name): res += "'" res += escaped res += "'" else: res += escaped if node.length is not None: res += ':' length = str(node.length) if digits: length = '%.*g' % ((digits[0] if 'e' in length else digits[1]), node.length) res += length if nodes_left and nodes_left[-1][1] == current_depth: res += ',' return res + ';' ###Output _____no_output_____ ###Markdown Color gradient generation ###Code def hex2rgb(h): return tuple(int(h.lstrip('#')[i: i + 2], 16) for i in (0, 2 ,4)) def rgb2hex(r): return '#{:02x}{:02x}{:02x}'.format(r[0], r[1], r[2]) def make_color_palette(start, end, n=101): """Generate a gradient of 101 colors. Parameters ---------- start : str start color in hex format end : str end color in hex format n : int number of colors to return Returns ------- list of str colors in hex format """ start_, end_ = hex2rgb(start), hex2rgb(end) seqs = [np.linspace(start_[i], end_[i], n).astype(int) for i in range(3)] rgbs = [[seqs[x][i] for x in range(3)] for i in range(n)] return [rgb2hex(x) for x in rgbs] def make_color_gradient(node2val, colors): """Deal with polytomic taxa. Parameters ---------- node2val : dict of float or int node ID to value map colors : list of str 101 colors for values of 0 to 100 Returns ------- dict of str node ID to color map """ for id_, val in node2val.items(): if val is None or np.isnan(val) or val == '': node2val[id_] = 0 elif not isinstance(val, int) and not isinstance(val, float): raise ValueError('Invalid number %s.' % val) # shrink larger integers to 0-100 range max_val = max(node2val.values()) if max_val > 100: for id_ in node2val: node2val[id_] /= (max_val / 100) # convert fraction into percentage, and percentage to integer convert = True if max_val <= 1 else False for id_ in node2val: try: node2val[id_] = ( int(node2val[id_] * 100) if convert else int(node2val[id_])) except ValueError: print('%s' % id_) # map support to color return {k: colors[v] for k, v in node2val.items()} ###Output _____no_output_____ ###Markdown iTOL file generation ###Code def write_itol_label(f, id2label): """Generate iTOL node label file.""" f.write('LABELS\n') f.write('SEPARATOR TAB\n') f.write('DATA\n') for id_ in sort_node_ids(id2label): f.write('%s\t%s\n' % (id_, id2label[id_])) def write_itol_collapse(f, nodes_to_collapse): """Generate an iTOL collapse file. Parameters ---------- nodes_to_collapse : iterable of str node IDs to collapse f : file handle file to write collapse information """ f.write('COLLAPSE\n') f.write('DATA\n') for id_ in nodes_to_collapse: f.write('%s\n' % id_) def write_itol_tree_colors(f, id2color, target='branch', label_or_style='normal', size='1'): """Generate an iTOL tree colors file. Parameters ---------- id2label : dict of str node ID to text map f : file handle file to write node texts target, label_or_style, size : str or dict of str iToL flavors, either a fixed value or a node ID to value map target == "type" in iTOL jargon """ f.write('TREE_COLORS\n') f.write('SEPARATOR TAB\n') f.write('DATA\n') # format: ID, target, color, label_or_style, size_factor for id_ in sort_node_ids(id2color): f.write('%s\t%s\t%s\t%s\t%s\n' % ( id_, target[id_] if isinstance(target, dict) else target, id2color[id_], label_or_style[id_] if isinstance( label_or_style, dict) else label_or_style, size[id_] if isinstance(size, dict) else size)) def write_itol_dataset_text(f, title, id2text, position='0.5', color='#000000', style='normal', size='1', rotation='0'): """Generate an iTOL text dataset file. Parameters ---------- id2label : dict of str node ID to text map f : file handle file to write node texts title : str title of this dataset position, color, style, size, rotation : str or dict of str iToL flavors, either a fixed value or a node ID to value map """ f.write('DATASET_TEXT\n') f.write('SEPARATOR TAB\n') f.write('DATASET_LABEL\t%s\n' % title) f.write('SHOW_INTERNAL\t1\n') f.write('DATA\n') # format: ID, label, position, color, style, size_factor, rotation for id_ in sort_node_ids(id2text): text = id2text[id_] if isinstance(text, float): text = '%.3g' % text f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % ( id_, text, position[id_] if isinstance(position, dict) else position, color[id_] if isinstance(color, dict) else color, style[id_] if isinstance(style, dict) else style, size[id_] if isinstance(size, dict) else size, rotation[id_] if isinstance(rotation, dict) else rotation)) def write_itol_dataset_style(f, title, ids, target='branch', what='node', color='#000000', factor='normal', style='1', bgcolor=None): """Generate an iTOL style dataset file. Parameters ---------- f : file handle file to write node texts title : str title of this dataset ids : iterable of str node ID list target, what, color, factor, style, bgcolor : str or str or dict iToL flavors, either a fixed value or a node ID to value map """ f.write('DATASET_STYLE\n') f.write('SEPARATOR TAB\n') f.write('DATASET_LABEL\t%s\n' % title) f.write('COLOR\t#000000\n') f.write('DATA\n') # format: ID, target, what, color, factor, style, bgcolor for id_ in sort_node_ids(ids): f.write('%s\t%s\t%s\t%s\t%s\t%s' % ( id_, target[id_] if isinstance(target, dict) else target, what[id_] if isinstance(what, dict) else what, color[id_] if isinstance(color, dict) else color, factor[id_] if isinstance(factor, dict) else factor, style[id_] if isinstance(style, dict) else style)) if bgcolor is not None: f.write('\t%s' % bgcolor[id_] if isinstance(bgcolor, dict) else bgcolor) f.write('\n') ###Output _____no_output_____ ###Markdown FigTree file generation In a FigTree-compatible Nexus tree file, nodes (tips and internal nodes) and taxa may contain attributes in the following format:```(taxon1,taxon2)[&!name="Escherichia coli",support=90,range={80,95},!color=ff0000]:1.234,...``` Here "!name", "support", "range" and "!color" are the attributes. ###Code def make_figtree_attr_str(name, attr_db, attrs={}): """Generate a FigTree-compatible attribute string. Parameters ---------- name : str name of node or taxon to annotate attr_db : dict of dict of str map of names to attributes Returns ------- str formatted attribute string Notes ----- For example, attr_db = { '!name': {'N1': '"spA"', 'N2': '"spB"'...}, 'support': {'N1': '90', 'N2': '75'...} } For node "N2", the result will be `[&!name="spB",support=75]`. All values should be str. Strings should be double-quoted. Tuples should be written like `{"spA",0.95,#000000}`. Special FigTree-aware attributes such as "name", "color" and "collapse" should have a prefix `!`. """ for attr in attr_db: if name in attr_db[attr]: val = attr_db[attr][name] if val: # omit null or empty string attrs[attr] = val attr_strs = [] for attr, val in sorted(attrs.items(), key=lambda x: x[0]): attr_strs.append('%s=%s' % (attr, val)) return '[&%s]' % ','.join(attr_strs) if len(attr_strs) > 0 else '' def add_figtree_node_attrs(tree, node2attrs): """Add FigTree-compatible attributes to nodes of a tree. Parameters ---------- tree : skbio.TreeNode tree to add node attributes to node2attrs : dict of dict map of node names to attributes """ for node in tree.traverse(include_self=True): if not node.name: continue attrs = {} if node.is_tip() else {'id': '"%s"' % node.name} attr_str = make_figtree_attr_str(node.name, node2attrs, attrs) node.name = ('%s%s' % (node.name, attr_str) if node.is_tip() else attr_str) def add_figtree_taxon_attrs(tree, taxon2attrs): """Add FigTree-compatible attributes to the taxon labels. Parameters ---------- tree : skbio.TreeNode tree to add node attributes to taxon2attrs : dict of dict map of taxa to attributes Returns ------- list of str taxon labels with attributes appended """ res = [] for taxon in sorted(tree.subset()): attr_str = make_figtree_attr_str(taxon, taxon2attrs) res.append('%s%s' % (taxon, attr_str)) return res def write_figtree_nexus(tree, f, title='tree1', taxlabels=None): """Generate a FigTree-compatible Nexus tree file. Parameters ---------- tree : skbio.TreeNode tree to add node attributes to f : file handle file to write nexus tree title : str title of the tree taxlabels : list of str custom taxon labels to write """ f.write('#NEXUS\n') f.write('begin taxa;\n') f.write('\tdimensions ntax=%d;\n' % tree.count(tips=True)) f.write('\ttaxlabels\n') if taxlabels is None: taxlabels = sorted(tree.subset()) for taxon in taxlabels: f.write('\t%s\n' % taxon) f.write(';\n') f.write('end;\n') f.write('\n') f.write('begin trees;\n') f.write('\ttree %s = [&%s] ' % ( title, 'R' if len(tree.children) == 2 else 'U')) f.write(format_newick(tree, operators=None)) f.write('\n') f.write('end;\n') ###Output _____no_output_____ ###Markdown Pre-processing Read and process tree Read tree. ###Code tree = TreeNode.read(tree_fp) n, m = tree.count(), tree.count(tips=True) print('Tree has %d tips and %d internal nodes.' % (m, n - m)) tips = tree.subset() ###Output _____no_output_____ ###Markdown Convert null branch lengths to zero. ###Code for node in tree.traverse(include_self=False): node.length = node.length or 0.0 ###Output _____no_output_____ ###Markdown Get the precision (maximum number of float or scientific notion digits) of branch lengths. Will be useful in the correct formatting of branch lengths after collapsing the tree. ###Code max_f, max_e = 0, 0 for node in tree.traverse(): if node.length is not None: x = str(float(node.length)) if 'e' in x: max_e = max(max_e, digits(str(float(x.split('e')[0])))) else: max_f = max(max_f, digits(x)) max_f, max_e ###Output _____no_output_____ ###Markdown Calculate number of descendants of each node. ###Code node2n = {} for node in tree.postorder(include_self=True): if node.is_tip(): node2n[node.name] = 1 else: node2n[node.name] = sum([node2n[x.name] for x in node.children]) ###Output _____no_output_____ ###Markdown Read and process taxonomy ###Code dfr = pd.read_csv(taxonomy_fp, sep='\t', index_col=0) dfr = dfr[dfr.index.isin(tips)] dfr.index.name = 'node' dfr.dropna().head(5) ranks = dfr.columns.tolist() ranks ###Output _____no_output_____ ###Markdown Tree annotation Generate node labels The lowest common ancestor (LCA) of genomes represented by each taxon will receive this taxon as the node label. One node may receive multiple taxa if they all meet this criterium.If this operation is applied to the tax2tree consensus strings (`consensus_ranks.tsv`), the outcome should match the labels decorated to the tree by tax2tree (`decorations_by_rank.tsv`).In the current analysis, the input file should be the tax2tree consensus string filled by taxa representing single genomes (`filled_ranks.tsv`). Therefore the outcome will contain more information. Both tips and internal nodes will be included. ###Code labels = {} for rank in ranks: for taxon in dfr[rank].value_counts().index: indices = dfr[dfr[rank] == taxon].index.tolist() node = (indices[0] if len(indices) == 1 else tree.lca(list(tips.intersection(indices))).name) labels.setdefault(node, {})[rank] = taxon dfl = pd.DataFrame.from_dict(labels, orient='index') dfl.index.name = 'node' dfl = dfl[ranks] dfl = dfl.loc[sorted(dfl.index, key=lambda x: (x[0], int(x[1:])))] dfl.head(3) ###Output _____no_output_____ ###Markdown Get the highest-rank name when multiple ranks have names in a node label. ###Code def get_highest_taxon(row): """Get the highest taxon in a row.""" for rank in row.index: if pd.notnull(row[rank]): return rank, row[rank] return np.nan, np.nan dfl['hrank'], dfl['htaxon'] = zip(*dfl.apply(get_highest_taxon, axis=1)) dfl[['hrank', 'htaxon']].dropna().head(5) ###Output _____no_output_____ ###Markdown Collapse clades at or above a rank Identify the ranks to collapse. ###Code collapse_ranks = [] for rank in ranks: collapse_ranks.append(rank) if collapse_rank and rank == collapse_rank: break print('Collapse at the following ranks: %s.' % ', '.join(collapse_ranks)) ###Output Collapse at the following ranks: kingdom, phylum, class, order, family, genus. ###Markdown Generate a list of candidate nodes. ###Code df_can = dfl[dfl['hrank'].isin(collapse_ranks)][['hrank', 'htaxon']] df_can['size'] = df_can.index.to_series().map(node2n) df_can.head() ###Output _____no_output_____ ###Markdown Exclude nodes with number of descendants below threshold. ###Code if min_clade_size: to_keep = [] for row in df_can.itertuples(): th = min_clade_size[row.hrank] if isinstance(min_clade_size, dict) else min_clade_size if row.size >= th: to_keep.append(row.Index) df_can = df_can[df_can.index.isin(to_keep)] df_can.shape[0] df_ = df_can[df_can['htaxon'].str.contains('_\d+$', regex=True)].copy() ###Output _____no_output_____ ###Markdown Exclude split clades of the same taxon which has less than a fraction of the dominant clade. ###Code if min_split_clade_frac > 0: df_ = df_can[df_can['htaxon'].str.contains('_\d+$', regex=True)].copy() df_['taxon'], df_['idx'] = zip(*df_['htaxon'].apply(lambda x: x[::-1]).str.split( '_', n=1).apply(lambda x: (x[1][::-1], x[0][::-1]))) top_clade_sizes = dict(df_.query('idx == "1"')[['taxon', 'size']].values.tolist()) df_ = df_[df_['size'] >= df_['taxon'].map(top_clade_sizes) * min_split_clade_frac] df_can = df_can[df_can.index.isin(df_.index) | ~df_can['htaxon'].str.contains('_\d+$', regex=True)] df_can.shape[0] ###Output _____no_output_____ ###Markdown Get the dimensions of clades represented by internal nodes. ###Code tips = tree.subset() dimensions = {x: get_clade_dimensions(tree.find(x)) for x in df_can.index if x not in tips} df_dim = pd.DataFrame.from_dict(dimensions, orient='index') df_dim = df_dim.loc[sorted(df_dim.index, key=lambda x: int(x[1:]))] df_dim.head() ###Output _____no_output_____ ###Markdown Determine which clades (as represented by nodes) should be collapsed. The rationale is: Start from the lowest rank, move up the hierarchy. If a node is already marked as "collapsed", all its ancestral nodes will be prohibited from being selected. ###Code nodes_to_collapse = [] nodes_to_skip = set() for rank in collapse_ranks[::-1]: for node in df_can[df_can['hrank'] == rank].index: if node not in nodes_to_skip: nodes_to_collapse.append(node) for anc in tree.find(node).ancestors(): nodes_to_skip.add(anc.name) print('Nodes to collapse: %d.' % len(nodes_to_collapse)) ###Output Nodes to collapse: 207. ###Markdown Calculate how many tips (genomes) are covered by the collapsed clades. ###Code tips_covered = set() for name in nodes_to_collapse: node = tree.find(name) tips = set([name]) if node.is_tip() else node.subset() if len(tips.intersection(tips_covered)) > 0: raise ValueError('Overlapping clades detected.') tips_covered.update(tips) tips_missed = tree.subset() - tips_covered print('Tips covered: %d. Tips missed: %d.' % (len(tips_covered), len(tips_missed))) ###Output Tips covered: 6944. Tips missed: 3631. ###Markdown Tree visualization Tree and labels manipulation Tree pruning Original tree dimensions. ###Code tree_tips = tree.subset() tree_nodes = set(x.name for x in tree.non_tips(include_self=True)) print('Original tree has %d tips and %d internal nodes.' % (len(tree_tips), len(tree_nodes))) ###Output Original tree has 10575 tips and 10574 internal nodes. ###Markdown Prune tree to include collapsed clades only. ###Code nodes_w_labels = [x for x in dfl['htaxon'].dropna().index if x in tree_nodes] tree1 = selective_prune(tree, tips_covered, nodes_w_labels) if collapse_rank and delete_uncollapsed else tree.copy() tree1_tips = tree1.subset() tree1_nodes = set(x.name for x in tree1.non_tips(include_self=True)) print('Output tree has %d tips and %d internal nodes.' % (len(tree1_tips), len(tree1_nodes))) ###Output Output tree has 6944 tips and 6996 internal nodes. ###Markdown Export pruned tree. ###Code with open('pruned_tree.nwk', 'w') as f: f.write('%s\n' % format_newick(tree1, operators=None)) ###Output _____no_output_____ ###Markdown Tree shrinking Generate a tree in which the collapsed clades are actually deleted. ###Code tree3 = tree1.copy() nodes_to_remove = [] for node in tree3.non_tips(): if node.name in nodes_to_collapse: node.length += df_dim[collapse_length_func][node.name] nodes_to_remove.extend(node.children) tree3.remove_deleted(lambda x: x in nodes_to_remove) tree3.prune() print('Collapsed tree has %d tips.' % tree3.count(tips=True)) with open('collapsed_tree.nwk', 'w') as f: f.write(format_newick(tree3, operators=None)) ###Output Collapsed tree has 207 tips. ###Markdown Node label formatting Format node label strings. ###Code name_map = dfl['htaxon'].to_dict() if len(label_format_regexes) > 0: for id_ in name_map: for pattern, repl in label_format_regexes: name_map[id_] = re.sub(pattern, repl, name_map[id_]) if append_rank_code is True: for id_ in name_map: name_map[id_] = '%s__%s' % (dfl['hrank'][id_][0], name_map[id_]) if append_clade_size is True: for id_ in name_map: n = node2n[id_] if n > 1: name_map[id_] = '%s (%d)' % (name_map[id_], node2n[id_]) sorted(name_map.items())[:5] tip_name_map, node_name_map = {}, {} for id_, name in name_map.items(): if id_ in tree_tips: tip_name_map[id_] = name elif id_ in tree_nodes: node_name_map[id_] = name ###Output _____no_output_____ ###Markdown Additional attributes ###Code if custom_attrs_fps: dfa = {} for name, fp in custom_attrs_fps.items(): dfa[name] = pd.read_table(fp, index_col=0, names=[name]) ###Output _____no_output_____ ###Markdown FigTree file generation Generate FigTree tip and node name maps. ###Code figtree_tip_name_map = {k: '"%s"' % v for k, v in tip_name_map.items()} figtree_node_name_map = {k: '"%s"' % v for k, v in node_name_map.items()} if collapse_rank and hide_uncollapsed: for name in figtree_tip_name_map: if name in tips_missed: figtree_tip_name_map[name] = '""' ###Output _____no_output_____ ###Markdown Let FigTree display internal node labels without displaying labels of tips (including collapsed clades). ###Code no_labels = set(nodes_to_collapse).union(tree1_tips) figtree_label_map = {k: v for k, v in figtree_node_name_map.items() if k not in no_labels} tip2attrs = {'!name': figtree_tip_name_map} node2attrs = {'!name': figtree_node_name_map, 'label': figtree_label_map} ###Output _____no_output_____ ###Markdown Generate a FigTree collapse map. ###Code figtree_collapse_map = {} tree_radius = max(x.accumulate_to_ancestor(tree1) for x in tree1.tips()) for name in nodes_to_collapse: if name not in tree1_tips: length = df_dim[collapse_length_func][name] height = tree_radius - tree1.find(name).accumulate_to_ancestor(tree1) - length figtree_collapse_map[name] = '{"collapsed",%.*g}' % (max_f, height) sorted(figtree_collapse_map.items())[:5] node2attrs['!collapse'] = figtree_collapse_map ###Output _____no_output_____ ###Markdown Generate FigTree size map. ###Code node2attrs['size'] = node2n ###Output _____no_output_____ ###Markdown Generate additional attributes for FigTree. ###Code if custom_attrs_fps: for name, df_ in dfa.items(): if np.issubdtype(df_[name], np.number): map_ = {k: str(v) for k, v in df_[name].iteritems()} else: map_ = {k: '"%s"' % v for k, v in df_[name].iteritems()} tip2attrs[name] = node2attrs[name] = map_ ###Output _____no_output_____ ###Markdown Write FigTree files. ###Code tree2 = tree1.copy() taxlabels = add_figtree_taxon_attrs(tree2, tip2attrs) add_figtree_node_attrs(tree2, node2attrs) with open('figtree.tre', 'w') as f: write_figtree_nexus(tree2, f, taxlabels=taxlabels) print('Task completed.') ###Output Task completed. ###Markdown iTOL files generation Step 1: Upload the already exported pruned tree file (Newick format) to iTOL. Write iTOL node label file. (Applies to both tips and internal nodes, including collapsed triangles.) ###Code with open('label.txt', 'w') as f: write_itol_label(f, {**node_name_map, **tip_name_map}) ###Output _____no_output_____ ###Markdown Write iTOL branch text file ###Code branch_name_map = {k: v for k, v in node_name_map.items() if k not in nodes_to_collapse} with open('branch_text.txt', 'w') as f: # position = 0.5: at the middle of branch write_itol_dataset_text(f, 'branch text', branch_name_map, position='0.5', size='1') ###Output _____no_output_____ ###Markdown Write iTOL collapse file. ###Code if collapse_rank: with open('collapse.txt', 'w') as f: write_itol_collapse(f, sorted( x for x in nodes_to_collapse if x in tree1_nodes)) ###Output _____no_output_____ ###Markdown Write iTOL files for extra node attributes. ###Code color_gradient = make_color_palette(color_range[0], color_range[1]) if custom_attrs_fps: for name, df_ in dfa.items(): # node text with open('%s_node_text.txt' % name, 'w') as f: write_itol_dataset_text( f, '%s node text' % name, df_[name].to_dict(), position='1', size='1') # branch color gradient if np.issubdtype(df_[name], np.number): branch_color_map = make_color_gradient(df_[name].to_dict(), color_gradient) with open('%s_branch_color.txt' % name, 'w') as f: write_itol_dataset_style( f, '%s color gradient' % name, branch_color_map, target='branch', what='node', color=branch_color_map) print('Task completed!') ###Output Task completed!
Tedtalks data project_ss1078.ipynb
###Markdown 2553 rows ###Code # taking tags that have occurred more than 180 times to create columns count_vector = CountVectorizer(stop_words='english',min_df=180/len(data)) tag_array = count_vector.fit_transform(data.tags).toarray() tag_matrix = pd.DataFrame(tag_array, columns = count_vector.get_feature_names()) tag_matrix = tag_matrix.add_prefix('tags_') # append the columns obtained to the base data data = pd.concat([data,tag_matrix], axis=1) data=data.drop(['tags'], axis = 1) # drop tags column #list(data) data.head() # all date operations data['film_date'] = data['film_date'].apply(lambda x: datetime.date.fromtimestamp(int(x))) data['published_date'] = data['published_date'].apply(lambda x: datetime.date.fromtimestamp(int(x))) data['film_month'] = data['film_date'].apply(lambda x: x.month) data['pub_month'] = data['published_date'].apply(lambda x: x.month) data['film_weekday'] = data['film_date'].apply(lambda x: x.weekday()) # Monday: 0, Sunday: 6 data['pub_weekday'] = data['published_date'].apply(lambda x: x.weekday()) data[['film_date','published_date']].head() # pairplots between numerical variables to check for evident patterns and correlations nums = ['comments', 'duration', 'num_speaker', 'views'] sns.pairplot(data, vars=nums, size=3); sns.jointplot(x=data['languages'], y=data['views'], kind='reg').annotate(stats.pearsonr) ###Output _____no_output_____ ###Markdown There are a few videos which have high views despite common languages. These may be outliers which have high views due to some other factors. We will have to investigate those. ###Code sns.jointplot(x=data['views'], y=data['comments'], kind='reg').annotate(stats.pearsonr) ###Output _____no_output_____ ###Markdown There seems to be a very high correlation between comments and views as it is intuitive. ###Code # check relation between duration, comments and views data_sorted=data.sort_values(by='views',ascending=True) df2=data_sorted.iloc[:20,:] df2.index=range(0,len(df2)) #visualization data_viz = [ { 'y': df2.views, 'x': df2.index, 'mode': 'markers', 'marker': { 'color': df2.duration, 'size': df2.comments, 'showscale': True }, "text" : df2.main_speaker } ] iplot(data_viz) ###Output _____no_output_____ ###Markdown Clearly, there are a few videos which have a low views and longer. This may be an important factor for the model ###Code data['event'].unique() data['event_category'] = data.event.apply(lambda x: "TEDx" if "TEDx" in x else ("TED" if "TED" in x else "Other")) data['event_category'].value_counts() data['duration']= data['duration']/60 # per minute data['transcript'] = data['transcript'].fillna('') data['wc_per_min'] = data['transcript'].apply(lambda x: len(x.split()))/data['duration'] data.head() data.shape nlp = spacy.load('en') feats = ['char_count', 'word_count', 'word_count_cln', 'stopword_count', '_NOUN', '_VERB', '_ADP', '_ADJ', '_DET', '_PROPN', '_INTJ', '_PUNCT', '_NUM', '_PRON', '_ADV', '_PART', '_amod', '_advmod', '_acl', '_relcl', '_advcl', '_neg','_PERSON','_NORP','_FAC','_ORG','_GPE','_LOC','_PRODUCT','_EVENT','_WORK_OF_ART','_LANGUAGE'] class text_features: def __init__(self, df, textcol): self.df = df self.textcol = textcol self.c = "spacy_" + textcol self.df[self.c] = self.df[self.textcol].apply( lambda x : nlp(x)) self.pos_tags = ['NOUN', 'VERB', 'ADP', 'ADJ', 'DET', 'PROPN', 'INTJ', 'PUNCT',\ 'NUM', 'PRON', 'ADV', 'PART'] self.dep_tags = ['amod', 'advmod', 'acl', 'relcl', 'advcl','neg'] self.ner_tags = ['PERSON','NORP','FAC','ORG','GPE','LOC','PRODUCT','EVENT','WORK_OF_ART','LANGUAGE'] def _spacy_cleaning(self, doc): tokens = [token for token in doc if (token.is_stop == False)\ and (token.is_punct == False)] words = [token.lemma_ for token in tokens] return " ".join(words) def _spacy_features(self): self.df["clean_text"] = self.df[self.c].apply(lambda x : self._spacy_cleaning(x)) self.df["char_count"] = self.df[self.textcol].apply(len) self.df["word_count"] = self.df[self.c].apply(lambda x : len([_ for _ in x])) self.df["word_count_cln"] = self.df["clean_text"].apply(lambda x : len(x.split())) self.df["stopword_count"] = self.df[self.c].apply(lambda x : len([_ for _ in x if _.is_stop])) self.df["pos_tags"] = self.df[self.c].apply(lambda x : dict(Counter([_.head.pos_ for _ in x]))) self.df["dep_tags"] = self.df[self.c].apply(lambda x : dict(Counter([_.dep_ for _ in x]))) self.df["ner_tags"] = self.df[self.c].apply(lambda x : dict(Counter([_.ent_type_ for _ in x]))) def _flatten_features(self): for key in self.pos_tags: self.df["_" + key] = self.df["pos_tags"].apply(lambda x : \ x[key] if key in x else 0) for key in self.dep_tags: self.df["_" + key] = self.df["dep_tags"].apply(lambda x : \ x[key] if key in x else 0) for key in self.ner_tags: self.df["_" + key] = self.df["ner_tags"].apply(lambda x : \ x[key] if key in x else 0) def generate_features(self): self._spacy_features() self._flatten_features() self.df = self.df.drop([self.c, "pos_tags", "dep_tags", 'ner_tags',"clean_text"], axis=1) return self.df def spacy_features(df, tc): fe = text_features(df, tc) return fe.generate_features() textcol = "transcript" transcript_features = spacy_features(data, textcol) transcript_features[[textcol] + feats].head() data['transcript'].str.count("(Laughter)") data['transcript'].str.count("(Applause)") data['laughter_count']=data['transcript'].str.count("(Laughter)") data['applaud_count']=data['transcript'].str.count("(Applause)") data.head() ###Output _____no_output_____
examples/notebooks/Distribute_Generic_Functions.ipynb
###Markdown Distribute functions across a BigQuery dataset using Spark Problem: As a PM, I give lots of public presentations and I want to make sure I use images that have an open license BigQuery Public Datasets - Open Images: 9 million URLs of open images (with labels across 6,000 categories) For smaller datasets, can use BigQuery magic and python ###Code %reload_ext google.cloud.bigquery %%bigquery pd_results --use_bqstorage_api SELECT original_url, title FROM `bigquery-public-data.open_images.images` WHERE license = 'https://creativecommons.org/licenses/by/2.0/' LIMIT 10 #review what our image database contains. import pandas as pd pd.set_option('display.max_colwidth', None) pd_results.head() ###Output _____no_output_____ ###Markdown Looks like a great set of images but how do I find what I need? What's a DSC-4918? ###Code #function that makes is super easy to abstract some high confidence labels about my image. from google.cloud import vision def AnnotateHighConfidenceLabelsFromImage(image_uri): client = vision.ImageAnnotatorClient() request = { 'image': { 'source': {'image_uri': image_uri}, }, } response = client.annotate_image(request) high_confidence_labels = [] for la in response.label_annotations: if float(la.score * 100) > 90.0: high_confidence_labels.append(la.description) if len(high_confidence_labels) < 1: high_confidence_labels.append("No labels detected") return str(high_confidence_labels) #for 10 images, no problem to simply loop through them to get the labels. for image in pd_results['original_url']: labels = AnnotateHighConfidenceLabelsFromImage(image) print(labels) ###Output ['No labels detected'] ['No labels detected'] ['Branch', 'Organism', 'Plant community', 'Monochrome photography', 'Monochrome', 'Adaptation'] ['Arm', 'Finger', 'People', 'Comfort', 'Hand', 'Child'] ['Electronic device', 'Furniture', 'Technology', 'Table', 'Laptop', 'Computer accessory', 'Computer'] ['Branch', 'Twig', 'Adaptation', 'Woody plant'] ['Organism', 'Bird'] ['Atmosphere', 'Cloud', 'Atmospheric phenomenon'] ['Dog breed', 'Dog', 'Carnivore', 'Mammal'] ['Text', 'White', 'Line', 'Font', 'Colorfulness'] ###Markdown Expanding to the full corpus of images will require scaling with Spark ###Code #but what happens when I need to run that label extractor against the full dataset of images. no_limit_query = "SELECT original_url, title FROM `bigquery-public-data.open_images.images` WHERE license = 'https://creativecommons.org/licenses/by/2.0/' LIMIT 100" # use Spark to load full dataset into Spark Dataframe. Setup Spark Session with BQ storage connector from pyspark.sql import SparkSession spark = SparkSession.builder.config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2." + str(12) + ":0.18.0") \ .enableHiveSupport() \ .getOrCreate() #Use this function to push the processing of the query back to BQ but still use BQ Storage Connector to #pull back data in parallel and directly into a Spark DF that can handle the size. from google.cloud import bigquery from pyspark import StorageLevel def bq2df(QUERY): bq = bigquery.Client() query_job = bq.query(QUERY) query_job.result() df = spark.read.format('bigquery') \ .option('dataset', query_job.destination.dataset_id) \ .load(query_job.destination.table_id) \ .persist(StorageLevel.MEMORY_AND_DISK) return df df = bq2df(no_limit_query) print(df.count()) df.printSchema() #I'm now going to Spark-ify my python function with no code changes. from pyspark.sql.functions import udf @udf("string") def AnnotateHighConfidenceLabelsFromImage_UDF(image_uri): from google.cloud import vision client = vision.ImageAnnotatorClient() request = { 'image': { 'source': {'image_uri': image_uri}, }, } response = client.annotate_image(request) high_confidence_labels = [] for la in response.label_annotations: if float(la.score * 100) > 90.0: high_confidence_labels.append(la.description) if len(high_confidence_labels) < 1: high_confidence_labels.append("No labels detected") return str(high_confidence_labels) df_results = df.select("original_url", "title",\ AnnotateHighConfidenceLabelsFromImage_UDF("original_url").alias("labels"))\ .cache() #at this point, might make sense to save this table out to my hive metastore to avoid re-processing all the images #df_results.write.saveAsTable("HighConfidenceLabelsAndImages") df_results.show(10, truncate=False) from pyspark.sql.functions import col df_results.where(col("labels").contains("Bird")).show(truncate=False) ###Output +----------------------------------------------------------------+-------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+ |original_url |title |labels | +----------------------------------------------------------------+-------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+ |https://farm4.staticflickr.com/1152/532400494_d65f8b7970_o.jpg |DSC_0451 |['Organism', 'Bird'] | |https://farm5.staticflickr.com/3711/11060890716_08737a2dd7_o.jpg|Pale-breasted spinetail brasso seco nov2013|['Branch', 'Bird', 'Twig'] | |https://c4.staticflickr.com/1/31/48416653_90f005725b_o.jpg |Thousand Oaks as seen from Ladyface Peak |['Landscape', 'Residential area', 'Aerial photography', 'Atmospheric phenomenon', "Bird's-eye view", 'Suburb', 'Plain'] | |https://c1.staticflickr.com/1/33/66496718_d17cac35c8_o.jpg |Canyon Lands |['Branch', 'Sky', 'Twig', 'Bird'] | |https://c7.staticflickr.com/9/8616/16415108690_51ec731c1f_o.jpg |Sarphatipark @ De Pijp @ Amsterdam |['Body of water', 'Water', 'Vertebrate', 'Bird', 'Ducks, geese and swans', 'Pond', 'Waterway', 'Water bird', 'Channel', 'Watercourse']| +----------------------------------------------------------------+-------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
03_Combine_DFs_and_Munge.ipynb
###Markdown The objective of this notebook is:- Combine all data gathered so far- Only consider inforamation from 2009 and onwards- Drop duplicate CVEs- Remove "REJECT" entires and duplicates- Engineer features ready for machine learning algorithms- Split between train/test dataItems such as NLP text to numbers and normalization can only be done after train/test/split. ###Code import pandas as pd import numpy as np import glob import os import munge_help from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.utils.class_weight import compute_class_weight from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier #from sklearn.svm import SVC from sklearn.metrics import confusion_matrix,plot_roc_curve,plot_precision_recall_curve,plot_confusion_matrix, classification_report #from plot_help import plot_confusion_matrix from nltk.stem import WordNetLemmatizer import utils import xgboost as xgb import matplotlib.pyplot as plt %matplotlib inline ###Output _____no_output_____ ###Markdown Why lemmatization and not stemming?The goal of both stemming and lemmatization is to reduce inflectional forms and sometimes derivationally related forms of a word to a common base form.However, the two words differ in their flavor. Stemming usually refers to a crude heuristic process that chops off the ends of words in the hope of achieving this goal correctly most of the time, and often includes the removal of derivational affixes. Lemmatization usually refers to doing things properly with the use of a vocabulary and morphological analysis of words, normally aiming to remove inflectional endings only and to return the base or dictionary form of a word, which is known as the lemma .source:https://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html NVD Data ###Code nvd_data_path = os.path.join('data', 'nvdcve_combined.csv') df_nvd = pd.read_csv(nvd_data_path) #fix numbers that are strings df_nvd = munge_help.infer_dtypes(df_nvd) # from none/low/high to 0/1/2 df_nvd = munge_help.categorical_to_numerical(df_nvd) df_nvd.head() #assert no missing values assert df_nvd.isna().mean().sum()== 0 ###Output _____no_output_____ ###Markdown ExploitDB Data ###Code edb_data_path = os.path.join('data', 'exploitdb_metadata.csv') edb_df = pd.read_csv(edb_data_path, index_col=0) edb_df = munge_help.exploit_db_munger(edb_df) edb_df.head() assert edb_df.isna().mean().sum()==0 edb_df.shape ###Output _____no_output_____ ###Markdown Exploited in Wild ###Code wild_data_path = os.path.join('data', 'target_cve.csv') df_target = pd.read_csv(wild_data_path) df_target.head() ###Output _____no_output_____ ###Markdown Combine ###Code #join nvd and edb df_join = pd.merge(left=df_nvd, right=edb_df, how='left', left_on='ID', right_on='CVE') #drop right column after join df_join = df_join.drop(columns=['CVE']) #join next on target df_join = pd.merge(left=df_join, right=df_target, how='left', left_on='ID', right_on='ID') df_join.columns[df_join.isnull().any()] ###Output _____no_output_____ ###Markdown A lot of the NaN columns stem from the exploitdb type. In case of doubt we opt to fill those wih zeroes. ###Code df_join = df_join.fillna(value=0) # TODO: only use dates 2009 and after # TODO: text to ngram # TODO: standardaize num values # TODO: look at correlation coefficients ###Output _____no_output_____ ###Markdown Cleaning up Joined DF Only published on or after 2009 ###Code #make a deep copy df_2009 = df_join.copy(deep=True) #get year CVE was published df_2009['year'] = df_2009['publishedDate'].dt.year #get only years after 2009 df_2009 = df_2009[df_2009['year']>=2009] #drop years column df_2009 = df_2009.drop(columns=['year']) ###Output _____no_output_____ ###Markdown Drop duplicate CVEs ###Code #make a deep copy df_dup = df_2009.copy(deep=True) #drop duplicate IDs df_dup = df_dup.drop_duplicates(subset='ID') print("Rows before dropping dups = {}".format(df_2009.shape[0])) print("Rows before dropping dups = {}".format(df_dup.shape[0])) ###Output Rows before dropping dups = 118957 Rows before dropping dups = 115798 ###Markdown Remove all CVEs that have been rejected by the description itself. ###Code #name it accept so that it contains only accepted entries after cleanup df_accept = df_dup.copy(deep=True) #see how many CVEs are rejected df_accept[df_accept['description'].str.contains("REJECT")].shape # see if we have CVEs that are rejected AND are exploited in the wild # call this a contradiction df_contradiction = df_accept[(df_accept['description'].str.contains("REJECT")) \ & (df_accept['in_the_wild'] == 1)] df_contradiction.shape df_contradiction['description'] ###Output _____no_output_____ ###Markdown Since the number of rows that are both rejected and we think are exploited in the wild is low (5), we decide to drop these rows. ###Code #keep only accepted entires df_accept = df_accept[~df_accept['description'].str.contains("REJECT")] ###Output _____no_output_____ ###Markdown Remove object columns that we won't need ###Code obj_columns = df_accept.select_dtypes(exclude='number').columns print("Columns that are non-numeric:\n{}".format(obj_columns)) #make a deep copy df_num = df_accept.copy(deep=True) #drop columns that we won't need #note that we keep the description column df_num = df_num.drop(columns=['ID', 'cwe_val', 'vectorString_V3', 'vectorString_V2','publishedDate', 'lastModifiedDate', 'url', 'Date']) df_num.shape ###Output _____no_output_____ ###Markdown Train Test Split ###Code X = df_num.drop(columns=['in_the_wild']) y = df_num['in_the_wild'] print(X.shape) y.mean() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, stratify=y, random_state=42) #quick check that stratification worked #check how positive in train print("Percent explited in wild in train data... {:.3%}".format(y_train.mean())) #check how positive in test print("Percent explited in wild in test data... {:.3%}".format(y_test.mean())) ###Output Percent explited in wild in train data... 1.113% Percent explited in wild in test data... 1.112% ###Markdown NLP Pipeline Feature Extractionmake use of TfidfVectorizer:Convert a collection of text documents to a matrix of token counts.This sklearn package is useful because it handles a lot of the preprocessing for us such as make everything lower. ###Code #isolate the descriptions description_train_raw = X_train['description'] #save the text column utils.save_obj(obj=description_train_raw, path=os.path.join('data_processed', 'description_train_raw.pkl')) #repeat for test data description_test_raw = X_test['description'] #save the text column utils.save_obj(obj=description_test_raw, path=os.path.join('data_processed', 'description_test_raw.pkl')) #instantiate the vectorizier # note that ngram only applies if analyzer is not callable. vectorizer = TfidfVectorizer(encoding='utf-8', decode_error='strict', strip_accents='ascii', #remove accents lowercase=True, #make everything lowercase before vectorizing preprocessor=None, tokenizer=None, analyzer='word', #feature to be made of word ngrams stop_words='english', #remove english stopwords #token_pattern='(?u)\b\w\w+\b', #keep this one default ngram_range=(1, 3), max_df=.8, #if appears in more than this percent don't use it min_df=10, #ignore terms that have a document frequency strictly lower than the given threshold max_features=100, #arbitrary number vocabulary=None, binary=False, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False) vectorizer.fit(X_train['description']) #see what the token pattern is #note that it is commented out when we instantiate the vectorizer #it could be a bug vectorizer.token_pattern #see what's in our vocab vectorizer.get_feature_names() ###Output _____no_output_____ ###Markdown Data Preprocessing preprocess train data ###Code #instantiate scaler scaler = MinMaxScaler(feature_range = (0,1)) #get numerical features X_train_num = X_train.drop(columns='description') #fit to train numerical data scaler.fit(X_train_num) #transform X_train_num = scaler.transform(X_train_num) #isolate text column X_train_text = X_train['description'] #apply tfidf transform X_train_text = vectorizer.transform(X_train_text) ###Output _____no_output_____ ###Markdown preprocess test data ###Code #get numerical features X_test_num = X_test.drop(columns='description') #scale test data X_test_num = scaler.transform(X_test_num) #isolate text column X_test_text = X_test['description'] #apply tfidf transform X_test_text = vectorizer.transform(X_test_text) #check shape X_train_num.shape X_train_text.shape ###Output _____no_output_____ ###Markdown How can we make tfidf vocab smaller? ###Code #make a dataframe of idf weights df_idf = pd.DataFrame(vectorizer.idf_, index=vectorizer.get_feature_names(), columns=['idf_weights']) df_idf = df_idf.sort_values(by=['idf_weights'], ascending=False) df_idf.head(10) df_idf.tail(10) ###Output _____no_output_____ ###Markdown Save data and artifacts ###Code #train data utils.save_obj(obj = X_train, path = os.path.join('data_processed', 'X_train.pkl')) #train data utils.save_obj(obj = y_train, path = os.path.join('data_processed', 'y_train.pkl')) utils.save_obj(obj = X_train_num, path = os.path.join('data_processed', 'X_train_num.pkl')) utils.save_obj(obj = X_train_text, path = os.path.join('data_processed', 'X_train_text.pkl')) #test data utils.save_obj(obj = X_test, path = os.path.join('data_processed', 'X_test.pkl')) #test data utils.save_obj(obj = y_test, path = os.path.join('data_processed', 'y_test.pkl')) utils.save_obj(obj = X_test_num, path = os.path.join('data_processed', 'X_test_num.pkl')) utils.save_obj(obj = X_test_text, path = os.path.join('data_processed', 'X_test_text.pkl')) #transformers utils.save_obj(obj = vectorizer, path = os.path.join('artifacts', 'vectorizer_tfidf.pkl')) utils.save_obj(obj = scaler, path = os.path.join('artifacts', 'minmax_scaler.pkl')) ###Output _____no_output_____ ###Markdown Q: Are numeric features alone sufficient? Logistic Regression ###Code #instantiate with default params lr = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight='balanced', random_state=None, solver='lbfgs', max_iter=100, multi_class='auto', verbose=0, warm_start=False, n_jobs=-1, l1_ratio=None) lr.fit(X_train_num, y_train) y_lr_num = lr.predict(X_test_num) utils.metric_evaluation(lr, X_train_num, X_test_num, y_test, 'Logistic Regression Numbers Only') ###Output Confusion Matrix in array form [[4396 939] [ 19 41]] #################### ###Markdown Naive Bayes ###Code nb = GaussianNB(priors=None, var_smoothing=1e-09) nb.fit(X_train_num, y_train) y_nb_num=nb.predict(X_test_num) utils.metric_evaluation(nb, X_train_num, X_test_num, y_test, title = 'Naive Bayes Numbers Only') ###Output Confusion Matrix in array form [[ 139 5196] [ 0 60]] #################### ###Markdown Random forest ###Code rf = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None, verbose=0, warm_start=False, class_weight='balanced', ccp_alpha=0.0, max_samples=None) rf.fit(X_train_num, y_train) y_rf_num = rf.predict(X_test_num) utils.metric_evaluation(rf, X_train_num, X_test_num, y_test, title = 'Random Forest Numbers Only') ###Output Confusion Matrix in array form [[4811 524] [ 29 31]] #################### ###Markdown GBTree ###Code gb = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, min_impurity_split=None, init=None, random_state=None, max_features='sqrt', verbose=0, max_leaf_nodes=None, warm_start=False, presort='deprecated', validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0) gb.fit(X_train_num, y_train) y_gb_num = gb.predict(X_test_num) utils.metric_evaluation(gb, X_train_num, X_test_num, y_test, title = 'GB Tree Numbers Only') ###Output Confusion Matrix in array form [[5329 6] [ 59 1]] #################### ###Markdown SVM ###Code # svc = SVC( C=1.0, # kernel='rbf', # degree=3, # gamma='scale', # coef0=0.0, # shrinking=True, # probability=False, # tol=0.001, # cache_size=200, # class_weight='balanced', # verbose=False, # max_iter=-1, # decision_function_shape='ovr', # break_ties=False, # random_state=None) # svc.fit(X_train_num, y_train) # y_svc_num = svc.predict(X_test_num) # confusion_matrix(y_test, y_svc_num) #utils.metric_evaluation(svc, X_train_num, X_test_num, y_test) ###Output _____no_output_____ ###Markdown XGBoostadapted from here: https://www.kdnuggets.com/2017/03/simple-xgboost-tutorial-iris-dataset.html ###Code bst = xgb.XGBClassifier(n_estimators=100, colsample_bytree=0.9, eta=0.9, max_depth=6, num_boost_round=10, subsample=0.9, n_jobs=-1) bst.fit(X_train_num, y_train) y_xgb_num = bst.predict(X_test_num) utils.metric_evaluation(bst, X_train_num, X_test_num, y_test, title = 'XGBoost Numbers Only') ###Output Confusion Matrix in array form [[5324 11] [ 55 5]] #################### ###Markdown Putting all the plots together ###Code fig = plt.figure(figsize=(8,8)) ax = fig.gca() lr_roc = plot_roc_curve(lr, X_test_num, y_test, name='LogReg', ax=ax) nb_roc = plot_roc_curve(nb, X_test_num, y_test, name='NaiveBayes', ax=ax) rf_roc = plot_roc_curve(rf, X_test_num, y_test, name='RandomForest', ax=ax) gb_roc = plot_roc_curve(gb, X_test_num, y_test, name='GBTree', ax=ax) xgb_roc = plot_roc_curve(bst, X_test_num, y_test, name='XGBoost', ax=ax) plt.title("ROC Curve for Numerical Features Only") fig = plt.figure(figsize=(8,8)) ax = fig.gca() lr_prc = plot_precision_recall_curve(lr, X_test_num, y_test, name='LogReg', ax=ax) nb_prc = plot_precision_recall_curve(nb, X_test_num, y_test, name='NaiveBayes', ax=ax) rf_prc = plot_precision_recall_curve(rf, X_test_num, y_test, name='RandomForest', ax=ax) gb_prc = plot_precision_recall_curve(gb, X_test_num, y_test, name='GBTree', ax=ax) xgb_prc = plot_precision_recall_curve(bst, X_test_num, y_test, name='XGBoost', ax=ax) ax.legend(loc='upper right') plt.title("PRC Curve for Numerical Features") ###Output _____no_output_____ ###Markdown A: Numerical Features Alone Probably Not Enough If we use numerical data alone, performance ranges from inaccurate to abyssimal. We are unable to provide a solution that offers enough coverage of critical cases while also being efficient. In the subsequent notebook we explore using the text data for classifiers. Q: Are NLP Features alone enough? Logistic Regression ###Code #instantiate with default params lr_text = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, class_weight='balanced', random_state=None, solver='lbfgs', max_iter=100, multi_class='auto', verbose=0, warm_start=False, n_jobs=-1, l1_ratio=None) #fit to text only features lr_text.fit(X_train_text, y_train) utils.metric_evaluation(lr_text, X_train_text, X_test_text, y_test, title = 'Logistic Regression NLP Only') ###Output Confusion Matrix in array form [[4114 1221] [ 19 41]] #################### ###Markdown Naive Bayes ###Code nb_text = GaussianNB(priors=None, var_smoothing=1e-09) nb_text.fit(X_train_text.toarray(), y_train) y_nb_text = nb_text.predict(X_test_text.toarray()) utils.metric_evaluation(nb_text, X_train_text.toarray(), X_test_text.toarray(), y_test, title = 'Naive Bayes NLP Only') ###Output Confusion Matrix in array form [[3563 1772] [ 14 46]] #################### ###Markdown Random Forest ###Code rf_text = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='sqrt', max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=-1, random_state=None, verbose=0, warm_start=False, class_weight='balanced', ccp_alpha=0.0, max_samples=None) rf_text.fit(X_train_text, y_train) y_rf_text = rf_text.predict(X_test_text) utils.metric_evaluation(rf_text, X_train_text.toarray(), X_test_text.toarray(), y_test, title = 'Random Forest NLP Only') ###Output Confusion Matrix in array form [[5248 87] [ 52 8]] #################### ###Markdown GB Tree ###Code gb_text = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='friedman_mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, min_impurity_split=None, init=None, random_state=None, max_features='sqrt', verbose=0, max_leaf_nodes=None, warm_start=False, presort='deprecated', validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0) gb_text.fit(X_train_text, y_train) y_gb_text = gb_text.predict(X_test_text) utils.metric_evaluation(gb_text, X_train_text.toarray(), X_test_text.toarray(), y_test, title = 'GB Tree NLP Only') ###Output Confusion Matrix in array form [[5331 4] [ 60 0]] #################### ###Markdown XGB ###Code bst_text = xgb.XGBClassifier(n_estimators=100, colsample_bytree=0.9, eta=0.9, max_depth=6, num_boost_round=10, subsample=0.9, n_jobs=-1) bst_text.fit(X_train_text, y_train) utils.metric_evaluation(bst_text, X_train_text.toarray(), X_test_text.toarray(), y_test, title = 'XGB NLP Only') fig = plt.figure(figsize=(8,8)) ax = fig.gca() lr_roc = plot_roc_curve(lr_text, X_test_text, y_test, name='LogReg', ax=ax) nb_roc = plot_roc_curve(nb_text, X_test_text.toarray(), y_test, name='NaiveBayes', ax=ax) rf_roc = plot_roc_curve(rf_text, X_test_text, y_test, name='RandomForest', ax=ax) gb_roc = plot_roc_curve(gb_text, X_test_text, y_test, name='GBTree', ax=ax) xgb_roc = plot_roc_curve(bst_text, X_test_text, y_test, name='XGBoost', ax=ax) plt.title("ROC Curve for NLP Features Only") fig = plt.figure(figsize=(8,8)) ax = fig.gca() lr_prc = plot_precision_recall_curve(lr_text, X_test_text, y_test, name='LogReg', ax=ax) nb_prc = plot_precision_recall_curve(nb_text, X_test_text.toarray(), y_test, name='NaiveBayes', ax=ax) rf_prc = plot_precision_recall_curve(rf_text, X_test_text, y_test, name='RandomForest', ax=ax) gb_prc = plot_precision_recall_curve(gb_text, X_test_text, y_test, name='GBTree', ax=ax) xgb_prc = plot_precision_recall_curve(bst_text, X_test_text, y_test, name='XGBoost', ax=ax) ax.legend(loc='upper right') plt.title("PRC Curve for Numerical Features") ###Output _____no_output_____
notebooks/BLS model/BLS model - balance deflection.ipynb
###Markdown Bilayer Sonophore model: computation of balance quasi-static deflection Imports ###Code import numpy as np import matplotlib.pyplot as plt from PySONIC.core import BilayerSonophore ###Output _____no_output_____ ###Markdown Functions ###Code def plotZeq(bls, ng_range, Q_range, fs=15): fig, ax = plt.subplots(figsize=(15, 4)) ax.set_xlabel('$Q_m\ (nC/cm^2)$', fontsize=fs) ax.set_ylabel('$Z_{eq}\ (nm)$', fontsize=fs) for ng in ng_range: ZeqQS = np.array([bls.balancedefQS(ng, Q) for Q in Q_range]) ax.plot(Q_range * 1e5, ZeqQS * 1e9, label=f'ng = {(ng * 1e22):.2f}e-22 mole') for item in ax.get_xticklabels() + ax.get_yticklabels(): item.set_fontsize(fs) for key in ['top', 'right']: ax.spines[key].set_visible(False) ax.legend(fontsize=fs, loc='center right', bbox_to_anchor=(1.8, 0.5), frameon=False) fig.tight_layout() return fig ###Output _____no_output_____ ###Markdown Parameters ###Code a = 32e-9 # in-plane radius (m) Cm0 = 1e-2 Qm0 = -71.9e-5 bls = BilayerSonophore(a, Cm0, Qm0) charges = np.linspace(-80, 40, 200) * 1e-5 gas = np.linspace(0.5 * bls.ng0, 2.0 * bls.ng0, 5) ###Output _____no_output_____ ###Markdown Balance deflections ###Code fig = plotZeq(bls, gas, charges) ###Output _____no_output_____
d2l-en/tensorflow/chapter_optimization/convexity.ipynb
###Markdown Convexity:label:`sec_convexity`Convexity plays a vital role in the design of optimization algorithms. This is largely due to the fact that it is much easier to analyze and test algorithms in this context. In other words, if the algorithm performs poorly even in the convex setting we should not hope to see great results otherwise. Furthermore, even though the optimization problems in deep learning are generally nonconvex, they often exhibit some properties of convex ones near local minima. This can lead to exciting new optimization variants such as :cite:`Izmailov.Podoprikhin.Garipov.ea.2018`. BasicsLet us begin with the basics. SetsSets are the basis of convexity. Simply put, a set $X$ in a vector space is convex if for any $a, b \in X$ the line segment connecting $a$ and $b$ is also in $X$. In mathematical terms this means that for all $\lambda \in [0, 1]$ we have$$\lambda \cdot a + (1-\lambda) \cdot b \in X \text{ whenever } a, b \in X.$$This sounds a bit abstract. Consider the picture :numref:`fig_pacman`. The first set is not convex since there are line segments that are not contained in it. The other two sets suffer no such problem.![Three shapes, the left one is nonconvex, the others are convex](../img/pacman.svg):label:`fig_pacman`Definitions on their own are not particularly useful unless you can do something with them. In this case we can look at unions and intersections as shown in :numref:`fig_convex_intersect`. Assume that $X$ and $Y$ are convex sets. Then $X \cap Y$ is also convex. To see this, consider any $a, b \in X \cap Y$. Since $X$ and $Y$ are convex, the line segments connecting $a$ and $b$ are contained in both $X$ and $Y$. Given that, they also need to be contained in $X \cap Y$, thus proving our first theorem.![The intersection between two convex sets is convex](../img/convex-intersect.svg):label:`fig_convex_intersect`We can strengthen this result with little effort: given convex sets $X_i$, their intersection $\cap_{i} X_i$ is convex.To see that the converse is not true, consider two disjoint sets $X \cap Y = \emptyset$. Now pick $a \in X$ and $b \in Y$. The line segment in :numref:`fig_nonconvex` connecting $a$ and $b$ needs to contain some part that is neither in $X$ nor $Y$, since we assumed that $X \cap Y = \emptyset$. Hence the line segment is not in $X \cup Y$ either, thus proving that in general unions of convex sets need not be convex.![The union of two convex sets need not be convex](../img/nonconvex.svg):label:`fig_nonconvex`Typically the problems in deep learning are defined on convex domains. For instance $\mathbb{R}^d$ is a convex set (after all, the line between any two points in $\mathbb{R}^d$ remains in $\mathbb{R}^d$). In some cases we work with variables of bounded length, such as balls of radius $r$ as defined by $\{\mathbf{x} | \mathbf{x} \in \mathbb{R}^d \text{ and } \|\mathbf{x}\|_2 \leq r\}$. FunctionsNow that we have convex sets we can introduce convex functions $f$. Given a convex set $X$ a function defined on it $f: X \to \mathbb{R}$ is convex if for all $x, x' \in X$ and for all $\lambda \in [0, 1]$ we have$$\lambda f(x) + (1-\lambda) f(x') \geq f(\lambda x + (1-\lambda) x').$$To illustrate this let us plot a few functions and check which ones satisfy the requirement. We need to import a few libraries. ###Code %matplotlib inline from d2l import tensorflow as d2l import numpy as np from mpl_toolkits import mplot3d import tensorflow as tf ###Output _____no_output_____ ###Markdown Let us define a few functions, both convex and nonconvex. ###Code f = lambda x: 0.5 * x**2 # Convex g = lambda x: tf.cos(np.pi * x) # Nonconvex h = lambda x: tf.exp(0.5 * x) # Convex x, segment = tf.range(-2, 2, 0.01), tf.constant([-1.5, 1]) d2l.use_svg_display() _, axes = d2l.plt.subplots(1, 3, figsize=(9, 3)) for ax, func in zip(axes, [f, g, h]): d2l.plot([x, segment], [func(x), func(segment)], axes=ax) ###Output _____no_output_____ ###Markdown As expected, the cosine function is nonconvex, whereas the parabola and the exponential function are. Note that the requirement that $X$ is a convex set is necessary for the condition to make sense. Otherwise the outcome of $f(\lambda x + (1-\lambda) x')$ might not be well defined. Convex functions have a number of desirable properties. Jensen's InequalityOne of the most useful tools is Jensen's inequality. It amounts to a generalization of the definition of convexity:$$\begin{aligned} \sum_i \alpha_i f(x_i) & \geq f\left(\sum_i \alpha_i x_i\right) \text{ and } E_x[f(x)] & \geq f\left(E_x[x]\right),\end{aligned}$$where $\alpha_i$ are nonnegative real numbers such that $\sum_i \alpha_i = 1$. In other words, the expectation of a convex function is larger than the convex function of an expectation. To prove the first inequality we repeatedly apply the definition of convexity to one term in the sum at a time. The expectation can be proven by taking the limit over finite segments.One of the common applications of Jensen's inequality is with regard to the log-likelihood of partially observed random variables. That is, we use$$E_{y \sim P(y)}[-\log P(x \mid y)] \geq -\log P(x).$$This follows since $\int P(y) P(x \mid y) dy = P(x)$.This is used in variational methods. Here $y$ is typically the unobserved random variable, $P(y)$ is the best guess of how it might be distributed and $P(x)$ is the distribution with $y$ integrated out. For instance, in clustering $y$ might be the cluster labels and $P(x \mid y)$ is the generative model when applying cluster labels. PropertiesConvex functions have a few useful properties. We describe them as follows. Local Minima is Global MinimaIn particular, the local minima for convex functions is also the global minima. Let us assume the contrary and prove it wrong. If $x^{\ast} \in X$ is a local minimum such that there is a small positive value $p$ so that for $x \in X$ that satisfies $0 < |x - x^{\ast}| \leq p$ there is $f(x^{\ast}) < f(x)$. Assume there exists $x' \in X$ for which $f(x') < f(x^{\ast})$. According to the property of convexity, $$\begin{aligned} f(\lambda x^{\ast} + (1-\lambda) x') &\leq \lambda f(x^{\ast}) + (1-\lambda) f(x') \\ &< \lambda f(x^{\ast}) + (1-\lambda) f(x^{\ast}) \\ &< f(x^{\ast}) \\\end{aligned}$$There exists $\lambda \in [0, 1)$, $\lambda = 1 - \frac{p}{|x^{\ast} - x'|}$ for an example, so that $0 < |\lambda x^{\ast} + (1-\lambda) x' - x^{\ast}| \leq p$. However, because $f(\lambda x^{\ast} + (1-\lambda) x') < f(x^{\ast})$, this violates our local minimum statement. Therefore, there does not exist $x' \in X$ for which $f(x') < f(x^{\ast})$. The local minimum $x^{\ast}$ is also the global minimum.For instance, the function $f(x) = (x-1)^2$ has a local minimum for $x=1$, it is also the global minimum. ###Code f = lambda x: (x-1)**2 d2l.set_figsize() d2l.plot([x, segment], [f(x), f(segment)], 'x', 'f(x)') ###Output _____no_output_____ ###Markdown The fact that the local minima for convex functions is also the global minima is very convenient. It means that if we minimize functions we cannot "get stuck". Note, though, that this does not mean that there cannot be more than one global minimum or that there might even exist one. For instance, the function $f(x) = \mathrm{max}(|x|-1, 0)$ attains its minimum value over the interval $[-1, 1]$. Conversely, the function $f(x) = \exp(x)$ does not attain a minimum value on $\mathbb{R}$. For $x \to -\infty$ it asymptotes to $0$, however there is no $x$ for which $f(x) = 0$. Convex Functions and SetsConvex functions define convex sets as *below-sets*. They are defined as$$S_b := \{x | x \in X \text{ and } f(x) \leq b\}.$$Such sets are convex. Let us prove this quickly. Remember that for any $x, x' \in S_b$ we need to show that $\lambda x + (1-\lambda) x' \in S_b$ as long as $\lambda \in [0, 1]$. But this follows directly from the definition of convexity since $f(\lambda x + (1-\lambda) x') \leq \lambda f(x) + (1-\lambda) f(x') \leq b$.Have a look at the function $f(x, y) = 0.5 x^2 + \cos(2 \pi y)$ below. It is clearly nonconvex. The level sets are correspondingly nonconvex. In fact, they are typically composed of disjoint sets. ###Code x, y = tf.meshgrid( tf.linspace(-1.0, 1.0, 101), tf.linspace(-1.0, 1.0, 101)) z = x**2 + 0.5 * tf.cos(2 * np.pi * y) # Plot the 3D surface d2l.set_figsize((6, 4)) ax = d2l.plt.figure().add_subplot(111, projection='3d') ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10}) ax.contour(x, y, z, offset=-1) ax.set_zlim(-1, 1.5) # Adjust labels for func in [d2l.plt.xticks, d2l.plt.yticks, ax.set_zticks]: func([-1, 0, 1]) ###Output _____no_output_____ ###Markdown Derivatives and ConvexityWhenever the second derivative of a function exists it is very easy to check for convexity. All we need to do is check whether $\partial_x^2 f(x) \succeq 0$, i.e., whether all of its eigenvalues are nonnegative. For instance, the function $f(\mathbf{x}) = \frac{1}{2} \|\mathbf{x}\|^2_2$ is convex since $\partial_{\mathbf{x}}^2 f = \mathbf{1}$, i.e., its derivative is the identity matrix.The first thing to realize is that we only need to prove this property for one-dimensional functions. After all, in general we can always define some function $g(z) = f(\mathbf{x} + z \cdot \mathbf{v})$. This function has the first and second derivatives $g' = (\partial_{\mathbf{x}} f)^\top \mathbf{v}$ and $g'' = \mathbf{v}^\top (\partial^2_{\mathbf{x}} f) \mathbf{v}$ respectively. In particular, $g'' \geq 0$ for all $\mathbf{v}$ whenever the Hessian of $f$ is positive semidefinite, i.e., whenever all of its eigenvalues are greater equal than zero. Hence back to the scalar case.To see that $f''(x) \geq 0$ for convex functions we use the fact that$$\frac{1}{2} f(x + \epsilon) + \frac{1}{2} f(x - \epsilon) \geq f\left(\frac{x + \epsilon}{2} + \frac{x - \epsilon}{2}\right) = f(x).$$Since the second derivative is given by the limit over finite differences it follows that$$f''(x) = \lim_{\epsilon \to 0} \frac{f(x+\epsilon) + f(x - \epsilon) - 2f(x)}{\epsilon^2} \geq 0.$$To see that the converse is true we use the fact that $f'' \geq 0$ implies that $f'$ is a monotonically increasing function. Let $a < x < b$ be three points in $\mathbb{R}$. We use the mean value theorem to express$$\begin{aligned}f(x) - f(a) & = (x-a) f'(\alpha) \text{ for some } \alpha \in [a, x] \text{ and } \\f(b) - f(x) & = (b-x) f'(\beta) \text{ for some } \beta \in [x, b].\end{aligned}$$By monotonicity $f'(\beta) \geq f'(\alpha)$, hence$$\begin{aligned} f(b) - f(a) & = f(b) - f(x) + f(x) - f(a) \\ & = (b-x) f'(\beta) + (x-a) f'(\alpha) \\ & \geq (b-a) f'(\alpha).\end{aligned}$$By geometry it follows that $f(x)$ is below the line connecting $f(a)$ and $f(b)$, thus proving convexity. We omit a more formal derivation in favor of a graph below. ###Code f = lambda x: 0.5 * x**2 x = tf.range(-2, 2, 0.01) axb, ab = tf.constant([-1.5, -0.5, 1]), tf.constant([-1.5, 1]) d2l.set_figsize() d2l.plot([x, axb, ab], [f(x) for x in [x, axb, ab]], 'x', 'f(x)') d2l.annotate('a', (-1.5, f(-1.5)), (-1.5, 1.5)) d2l.annotate('b', (1, f(1)), (1, 1.5)) d2l.annotate('x', (-0.5, f(-0.5)), (-1.5, f(-0.5))) ###Output _____no_output_____
mysite/static/ipython/4.MS-Face.ipynb
###Markdown **Face API**MicroSoft FACE api **1 MS API Key๊ฐ’ ์ถ”์ถœํ•˜๊ธฐ**https://azure.microsoft.com/en-gb/try/cognitive-services/my-apis/?apiSlug=face-api&country=Korea&allowContact=true&fromLogin=True ###Code key1 = "" key2 = "" ###Output _____no_output_____ ###Markdown **2 Response ๊ฐ์ฒด ๋ถ„์„ํ•ด๋ณด๊ธฐ****Python Document**https://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python ###Code response = [ { "faceId": "35102aa8-4263-4139-bfd6-185bb0f52d88", "faceRectangle": { "top": 208, "left": 228, "width": 91, "height": 91 }, "faceAttributes": { "smile": 1, "headPose": { "pitch": 0, "roll": 4.3, "yaw": -0.3 }, "gender": "female", "age": 27, "facialHair": { "moustache": 0, "beard": 0, "sideburns": 0 }, "glasses": "NoGlasses", "emotion": { "anger": 0, "contempt": 0, "disgust": 0, "fear": 0, "happiness": 1, "neutral": 0, "sadness": 0, "surprise": 0 }, "blur": { "blurLevel": "low", "value": 0 }, "exposure": { "exposureLevel": "goodExposure", "value": 0.65 }, "noise": { "noiseLevel": "low", "value": 0 }, "makeup": { "eyeMakeup": True, "lipMakeup": True }, "accessories": [], "occlusion": { "foreheadOccluded": False, "eyeOccluded": False, "mouthOccluded": False }, "hair": { "bald": 0.06, "invisible": False, "hairColor": [ { "color": "brown", "confidence": 1 }, { "color": "blond", "confidence": 0.5 }, { "color": "black", "confidence": 0.34 }, { "color": "red", "confidence": 0.32 }, { "color": "gray", "confidence": 0.14 }, { "color": "other", "confidence": 0.03 } ] } } }, { "faceId": "42502166-31bb-4ac8-81c0-a7adcb3b3e70", "faceRectangle": { "top": 109, "left": 125, "width": 79, "height": 79 }, "faceAttributes": { "smile": 1, "headPose": { "pitch": 0, "roll": 1.7, "yaw": 2.1 }, "gender": "male", "age": 32, "facialHair": { "moustache": 0.4, "beard": 0.4, "sideburns": 0.4 }, "glasses": "NoGlasses", "emotion": { "anger": 0, "contempt": 0, "disgust": 0, "fear": 0, "happiness": 1, "neutral": 0, "sadness": 0, "surprise": 0 }, "blur": { "blurLevel": "low", "value": 0.11 }, "exposure": { "exposureLevel": "goodExposure", "value": 0.74 }, "noise": { "noiseLevel": "low", "value": 0 }, "makeup": { "eyeMakeup": False, "lipMakeup": True }, "accessories": [], "occlusion": { "foreheadOccluded": False, "eyeOccluded": False, "mouthOccluded": False }, "hair": { "bald": 0.02, "invisible": False, "hairColor": [ { "color": "brown", "confidence": 1 }, { "color": "blond", "confidence": 0.94 }, { "color": "red", "confidence": 0.76 }, { "color": "gray", "confidence": 0.2 }, { "color": "other", "confidence": 0.03 }, { "color": "black", "confidence": 0.01 } ] } } } ] # ๊ฐ์ฒด์˜ ์ˆ˜ ๊ณ„์‚ฐํ•˜๊ธฐ len(response) # 1๋ฒˆ ๊ฐ์ฒด์˜ Key๊ฐ’ ์ถ”์ถœํ•˜๊ธฐ response[0].keys() face_key = list(response[0].keys())[2] print(face_key) response[0][face_key].keys() response[0][face_key]['emotion'] ###Output _____no_output_____ ###Markdown **3 Python ์˜ˆ์ œ ๋”ฐ๋ผํ•˜๊ธฐ****Python Document**https://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python ###Code # ๋ถ„์„์„ ์œ„ํ•œ ์ด๋ฏธ์ง€๋ฅผ ์ง€์ •ํ•œ๋‹ค image_url = 'https://how-old.net/Images/faces2/main007.jpg' # ์œ„์—์„œ ์ถ”์ถœํ•œ API๋ฅผ ์—ฌ๊ธฐ์—์„œ ์—ฐ๊ฒฐํ•œ๋‹ค subscription_key = "์‚ฌ์šฉ์ž key๊ฐ’์„ ์ž…๋ ฅํ•˜์„ธ์š”" assert subscription_key # ์•„๋ž˜์˜ API ์ž‘๋™์— ํ•„์š”ํ•œ ๋ชจ๋“ˆ์ด ์žˆ๋Š”์ง€๋ฅผ ๋จผ์ € ํ™•์ธํ•œ๋‹ค %matplotlib inline import requests import matplotlib.pyplot as plt from PIL import Image from matplotlib import patches from io import BytesIO # MS API๋ฅผ ์ถ”์ถœ face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect' headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = { 'returnFaceId': 'true', 'returnFaceLandmarks': 'false', 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' + 'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise' } data = {'url': image_url} response = requests.post(face_api_url, params=params, headers=headers, json=data) faces = response.json() image = Image.open(BytesIO(requests.get(image_url).content)) plt.figure(figsize=(8, 8)) ax = plt.imshow(image, alpha=0.6) for face in faces: fr = face["faceRectangle"] fa = face["faceAttributes"] origin = (fr["left"], fr["top"]) p = patches.Rectangle( origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b') ax.axes.add_patch(p) plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]), fontsize=20, weight="bold", va="bottom") _ = plt.axis("off") ###Output _____no_output_____ ###Markdown **3 Emotion ์ž๋ฃŒ๋งŒ ์ถ”์ถœํ•˜๊ธฐ**์œ„์˜ Source ์—์„œ Matplotlib ๋ถ€๋ถ„ ์ œ๊ฑฐํ•˜๊ธฐhttps://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python ###Code # MS API๋ฅผ ์ถ”์ถœ face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect' headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = { 'returnFaceId': 'true', 'returnFaceLandmarks': 'false', 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' + 'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise' } data = {'url': image_url} response = requests.post(face_api_url, params=params, headers=headers, json=data) faces = response.json() image = Image.open(BytesIO(requests.get(image_url).content)) # plt.figure(figsize=(8, 8)) # ax = plt.imshow(image, alpha=0.6) emotion, people = [], [] for face in faces: fr = face["faceRectangle"] fa = face["faceAttributes"] origin = (fr["left"], fr["top"]) p = patches.Rectangle( origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b') # Emotion API ๊ฒฐ๊ณผ๊ฐ’ ์ถ”์ถœํ•˜๊ธฐ people.append("[" + fa["gender"].capitalize() + ':' + str(int(fa["age"])) + "]") emotion.append(fa['emotion']) # ax.axes.add_patch(p) # plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]), # fontsize=20, weight="bold", va="bottom") # _ = plt.axis("off") people emotion # ๊ฐ์ •๋ถ„์„ ๊ฒฐ๊ณผ๋ฅผ text๋กœ ์ถœ๋ ฅ result_str = '' import pandas as pd # Pandas๋ฅผ ํ™œ์šฉํ•˜์—ฌ ๊ฒฐ๊ณผ๊ฐ’์„ ๋‚ด๋ฆผ์ฐจ์ˆœ ์ •๋ ฌํ•œ๋‹ค for count, faceapi in enumerate(emotion): face_se = pd.Series(faceapi) face_se = face_se.sort_values(ascending=False) result_str += " " + people[count] + " " for no in range(len(face_se)): result_str += face_se.index[no] result_str += " : " + str(face_se[no]) + " " result_str ###Output _____no_output_____ ###Markdown **4 Emotion ํ•จ์ˆ˜ ๋งŒ๋“ค๊ธฐ**Django ์—์„œ ์‚ฌ์šฉํ•  ๊ฒฐ๊ณผ๊ฐ’ ์ถœ๋ ฅ ํ•จ์ˆ˜๋กœ ์ •๋ฆฌํ•˜๊ธฐhttps://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python ###Code # try : ์ž‘์—…์„ ์‹œ๋„ํ•œ๋‹ค # except : ์ž‘์—…์ค‘ ์˜ค๋ฅ˜๊ฐ€ ๋‚ฌ์„ ๋–„ ์ฒ˜๋ฆฌํ•  ๋‚ด์šฉ์„ ๊ธฐ๋กํ•œ๋‹ค for i in ['ํ•œ๊ตญ', '์ผ๋ณธ', '๋ฏธ๊ตญ']: try: i /= i except: print('+ ์—ฐ์‚ฐ์ด ๋˜์ง€ ์•Š๋Š” ๊ฐ์ฒด์ž…๋‹ˆ๋‹ค') i # MS-API๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ธ๋ฌผ์ •๋ณด๋ฅผ ์ถ”์ถœํ•œ๋‹ค def faceapi(image_url): try: import requests subscription_key = "key๊ฐ’์„ ์ž…๋ ฅํ•˜์„ธ์š”" assert subscription_key face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect' headers = {'Ocp-Apim-Subscription-Key': subscription_key} params = {'returnFaceId': 'true', 'returnFaceLandmarks': 'false', 'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' + 'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'} data = {'url': image_url} response = requests.post(face_api_url, params=params, headers=headers, json=data) faces = response.json() emotion, people = [], [] for face in faces: fa = face["faceAttributes"] people.append("[" + fa["gender"].capitalize() + ':' + str(int(fa["age"])) + "]") emotion.append(fa['emotion']) # ๊ฐ์ •๋ถ„์„ ๊ฒฐ๊ณผ๋ฅผ text๋กœ ์ถœ๋ ฅ result_str = '' import pandas as pd for count, faceapi in enumerate(emotion): face_se = pd.Series(faceapi) face_se = face_se.sort_values(ascending=False) result_str += " " + people[count] + " " for no in range(len(face_se)): result_str += face_se.index[no] result_str += " : " + str(face_se[no]) + " " except: result_str = '์ด๋ฏธ์ง€๊ฐ€ ๋ถ„์„์— ์ ํ•ฉํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค.' return result_str ###Output _____no_output_____
week3/Regex for Text Processing (In-Class).ipynb
###Markdown Regex IntroductionUse the [regexr.com](regexr.com) to practice and hone your regular expressions before applying them in Python. ###Code import re # standard Python library for text regular expression parsing SAMPLE_TWEET = ''' #wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from #google or #wikipedia, totally useless!" ''' ###Output _____no_output_____ ###Markdown `re.match` searches starting that the beginning of the string, while `re.search` searches the entire string. Match the first time a capital letter appears in the tweet ###Code match = re.search("[a-z]", SAMPLE_TWEET) match.group() ###Output _____no_output_____ ###Markdown Match all capital letters that appears in the tweet ###Code re.findall("[A-Z]", SAMPLE_TWEET) ###Output _____no_output_____ ###Markdown Match all words that are at least 3 characters long ###Code re.findall("[a-zA-Z0-9]{3,}", SAMPLE_TWEET) ###Output _____no_output_____ ###Markdown Match all hashtags in the tweet ###Code re.findall("#[a-zA-Z0-9]+", SAMPLE_TWEET) ###Output _____no_output_____ ###Markdown Match all hashtags in the tweets, capture only the text of the hashtag ###Code # capturing groups re.findall("#([\w]+)", SAMPLE_TWEET) ###Output _____no_output_____ ###Markdown Match all words that start with `t`, and are followed by `h` or `o` ###Code re.findall("(?:th|to)\w*", SAMPLE_TWEET) ###Output _____no_output_____ ###Markdown Match all words that end a sentence ###Code re.findall("(\w+)(\.|\?|\!)", SAMPLE_TWEET) ###Output _____no_output_____ ###Markdown Match word boundary*A thorough examination of the movie shows Thor was a thorn in the side of the villains. Thor.*```pythonre.findall("\b[tT]hor\b", SAMPLE_TWEET)``` How to Handle When the Regex Does Not Match? ###Code SAMPLE_TWEET = "A thorough examination of the movie shows Thor was a thorn in the side of the villains. Thor." re.findall("\b[tT]hor\b", SAMPLE_TWEET) mylist = "ASdad" if re.findall("\\bThor\\b", SAMPLE_TWEET): print("Found") else: print("Not found") ###Output Found ###Markdown Using Regex Combined with Pandas ###Code import pandas as pd # load in dataframe # get rid of some columns we don't care about # preview the data # get length of tweets in characters # count number of times Obama appears in tweets # find all the @s in the tweets # Mon May 11 03:17:40 UTC 2009 # get the weekday of tweet # get the month of the tweet # get the year of the tweet ###Output _____no_output_____ ###Markdown Exercises (15 minutes)1. Identify the list of email addresses for your security administrator to blacklist from your company's email servers.2. Identify any IP addresses that should be blacklisted (an IPv4 address goes from **1.1.1.1 to 255.255.255.255**)3. Find a sensible way to identify all names of individuals in the spam emails.3. Find all hashtags mentioned in the tweets dataset. Store it as a separate column called **hashtags**. ###Code # 1 Identify the list of email addresses for your security administrator to blacklist from your company's email servers. # 2 Identify any IP addresses that should be blacklisted (an IPv4 address goes from **1.1.1.1 to 255.255.255.255**) # 3 Find a sensible way to identify all names of individuals in the spam emails. # 4 Find all hashtags mentioned in the tweets dataset. Store it as a separate column called **hashtags**. ###Output _____no_output_____
DataScience/3.Pandas/My_Practice.ipynb
###Markdown Pandas Series ###Code import numpy as np import pandas as pd labels = ['a', 'b', 'c'] my_data = [10, 20, 30] arr = np.array(my_data) d = {'a': 10, 'b': 20, 'c': 30} pd.Series(data = my_data) pd.Series(data = my_data, index=labels) pd.Series(my_data, labels) pd.Series(arr, labels) pd.Series(d) pd.Series(data=labels) pd.Series(data = [sum, print, len]) ser1 = pd.Series([1, 2, 3, 4], ['USA', 'GERMANY', 'JAPAN', 'INDIA']) ser1 ser2 = pd.Series([1, 2, 3, 4], ['USA', 'GERMANY', 'ITALY', 'INDIA']) ser2 ser1['USA'] ser1 + ser2 ###Output _____no_output_____ ###Markdown DataFrames A DataFrame is a bunch of series that shares common index ###Code from numpy.random import randn np.random.seed(101) df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z']) df df['W'] type(df['W']) type(df) df.W #Not recommended as we may get confused df[['W', 'Z']] df['new'] df['new'] = df['W'] + df['X'] df df.drop('new', axis = 1) df df.drop('new', axis = 1, inplace=True) df df.drop('E') df df.shape # Accessing the Row values df.loc['C'] #for location or labeled based index df.iloc[2] #for numerical based index df.loc['A', 'X'] df.loc[['A', 'C'],['Z', 'Y']] df > 0 booldf = df > 0 df[booldf] df[df > 0] df['W'] > 0 df[df['W'] > 0] df[df['Z'] < 0] resultantdf = df[df['W'] > 0] resultantdf resultantdf['Y'] df[df['W'] > 0][['Y', 'X']] boolSer = df['W'] > 0 boolSer result = df[boolSer] result mycols = ['Y', 'X'] result[mycols] df[(df['W']>0) and (df['X'] > 0)] # and operator cann't compare a series of boolean values to another boolean values. It can compare only a boolean to another boolean True and False df[(df['W'] > 0) & ( df['X'] > 0)] df[(df['W'] > 0) | ( df['Y'] > 1)] df df.reset_index() #It will reset the row indices to integers starting from 0 but it will not effect the DataFrame. If you want to effect DataFrame place - df.reset_index(inplace=True) df df['States'] = 'CA IN US CO RA'.split() df df.set_index('States') # To set one of the column as row indices. Note: you can use inplace='True' argument to overwrite the original. #Index levels outside = ['G1', 'G1', 'G1','G2', 'G2', 'G2'] inside = [1, 2, 3, 1, 2, 3] hier_index = list(zip(outside, inside)) hier_index = pd.MultiIndex.from_tuples(hier_index) hier_index df = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B']) df df.loc['G1'] df.loc['G1'].loc[1] df.index.names df.index.names = ['Groups', 'Num'] df df.loc['G2'].loc[3]['B'] df.xs('G2') #It is cross-section df.xs(2, level='Num') #It is used to get the values of any level which is harder using loc method ###Output _____no_output_____ ###Markdown Missing Data ###Code dd = {'A': [1, 4, np.nan], 'B': [7, np.nan, np.nan], 'C': [1, 2, 3]} dff = pd.DataFrame(dd) dff dff.dropna() #It will drop rows having the null values.Note: you can use inplace='True' argument to overwrite the original. dff.dropna(axis=1) #It will drop columns having the null values.Note: you can use inplace='True' argument to overwrite the original. dff dff.dropna(thresh=2) # It will drop the rows having null values that are greater than or equal to thresh value dff.fillna('FILL VALUE') dff dff['A'].fillna(df['A'].mean()) dff ###Output _____no_output_____ ###Markdown Groupby - Groupby allows you to group together rows based off of a column and perform an aggregate function on them ###Code dataa = {'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'], 'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'], 'Sales':[200,120,340,124,243,350]} dfff = pd.DataFrame(dataa) dfff dfff.groupby('Company') byComp = dfff.groupby('Company') byComp byComp.mean() #As mean is the numeric expression It will ignore the non-numeric columns. byComp.sum() byComp.std() byComp.sum().loc['FB'] dfff.groupby('Company').sum().loc['FB'] dfff.groupby('Company').count() dfff.groupby('Company').min() dfff.groupby('Company').describe() dfff.groupby('Company').describe().transpose() dfff.groupby('Company').describe().transpose()['FB'] ###Output _____no_output_____ ###Markdown Merging, Joining, and Concatenating Concatenating ###Code df1 = pd.DataFrame({'A':['A0', 'A1', 'A2', 'A3'], 'B':['B0', 'B1', 'B2', 'B3'], 'C':['C0', 'C1', 'C2', 'C3'], 'D':['D0', 'D1', 'D2', 'D3']}, index=[0,1,2,3]) df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'], 'B': ['B4', 'B5', 'B6', 'B7'], 'C': ['C4', 'C5', 'C6', 'C7'], 'D': ['D4', 'D5', 'D6', 'D7']}, index=[4, 5, 6, 7]) df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'], 'B': ['B8', 'B9', 'B10', 'B11'], 'C': ['C8', 'C9', 'C10', 'C11'], 'D': ['D8', 'D9', 'D10', 'D11']}, index=[8, 9, 10, 11]) df1 df2 df3 pd.concat([df1, df2, df3]) pd.concat([df1, df2, df3], axis=1) ###Output _____no_output_____ ###Markdown Merging ###Code left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3'], 'key': ['K0', 'K1', 'K2', 'K3']}) right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}) left right pd.merge(left, right, how='inner', on='key') left2 = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'], 'key2': ['K0', 'K1', 'K0', 'K1'], 'A': ['A0', 'A1', 'A2', 'A3'], 'B': ['B0', 'B1', 'B2', 'B3']}) right2 = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'], 'key2': ['K0', 'K0', 'K0', 'K0'], 'C': ['C0', 'C1', 'C2', 'C3'], 'D': ['D0', 'D1', 'D2', 'D3']}) pd.merge(left2, right2, on=['key1', 'key2']) pd.merge(left2, right2, how='outer', on=['key1', 'key2']) pd.merge(left2, right2, how='right', on=['key1', 'key2']) pd.merge(left2, right2, how='left', on=['key1', 'key2']) ###Output _____no_output_____ ###Markdown Joining ###Code left3 = pd.DataFrame({'A': ['A0', 'A1', 'A2'], 'B': ['B0', 'B1', 'B2']}, index=['K0', 'K1', 'K2']) right3 = pd.DataFrame({'C': ['C0', 'C2', 'C3'], 'D': ['D0', 'D2', 'D3']}, index=['K0', 'K2', 'K3']) left3.join(right3) left3.join(right3, how='outer') left3.join(right3, how='inner') left3.join(right3, how='right') import pandas as pd df4 = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']}) df4.head() df4['col2'].unique() df4['col2'].nunique() df4['col2'].value_counts() df4[(df4['col2'] == 444) & (df4['col1']> 2)] def power2(x): return x ** 2 power2(4) df4['col1'].apply(power2) df4['col2'].apply(lambda x: x + 5) df4['col3'].apply(len) df4.columns df4.index df4.sort_values('col2') df4.sort_values(by = 'col2') df4.isnull() data3 = {'A':['foo','foo','foo','bar','bar','bar'], 'B':['one','one','two','two','one','one'], 'C':['x','y','x','y','x','y'], 'D':[1,3,2,5,4,1]} df5 = pd.DataFrame(data3) df5 df5.pivot_table(values='D',index=['A', 'B'],columns=['C']) ###Output _____no_output_____ ###Markdown Data Input and Output - sqlalchemy for SQL files- lxml for XML and HTML files- html5lib for HTML files- BeautifulSoup4 for HTML files- Pandas can read CSV files - CSV- Excel- HTML- SQL ###Code pwd pd.read_csv('example.csv') df6 = pd.read_csv('example.csv') df6 df6.to_csv('my_output') pd.read_csv('my_output') df6.to_csv('my_output', index=False) pd.read_csv('my_output') pd.read_excel('Excel_Sample.xlsx', sheet_name='Sheet1') df6.to_excel('Excel_Sample2.xlsx', sheet_name='NewSheet') df7 = pd.read_html('http://www.fdic.gov/bank/individual/failed/banklist.html') df7[0] df7[0].head() from sqlalchemy import create_engine engine = create_engine('sqlite:///:memory:') df6.to_sql('my_table', engine) sqldf = pd.read_sql('my_table', con=engine) sqldf ###Output _____no_output_____
Section-12-Engineering-Date-Time/12.02_Engineering_time.ipynb
###Markdown Engineering TimeIn this demo, we are going to extract different ways of representing time from a timestamp. We can extract for example:- hour- minute- second- data- elapsed timeWe will create a toy dataset for the demonstration. ###Code import pandas as pd import numpy as np import datetime # let's create a toy data set: 1 column 7 different timestamps, # 1 hr difference between timestamp date = pd.Series(pd.date_range('2015-1-5 11:20:00', periods=7, freq='H')) df = pd.DataFrame(dict(date=date)) df ###Output _____no_output_____ ###Markdown Extract the hr, minute and second ###Code df['hour'] = df['date'].dt.hour df['min'] = df['date'].dt.minute df['sec'] = df['date'].dt.second df ###Output _____no_output_____ ###Markdown Extract time part ###Code df['time'] = df['date'].dt.time df ###Output _____no_output_____ ###Markdown Extract hr, min, sec, at the same time ###Code # now let's repeat what we did in cell 3 in 1 command df[['h','m','s']] = pd.DataFrame([(x.hour, x.minute, x.second) for x in df['time']]) df ###Output _____no_output_____ ###Markdown Calculate time difference ###Code # let's create another toy dataframe with 2 timestamp columns # and 7 rows each, in the first column the timestamps change monthly, # in the second column the timestamps change weekly date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=7, freq='M')) date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=7, freq='W')) df = pd.DataFrame(dict(Start_date = date1, End_date = date2)) df # let's calculate the time elapsed in seconds df['diff_seconds'] = df['End_date'] - df['Start_date'] df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'s') df # let's calculate the time elapsed in minutes df['diff_seconds'] = df['End_date'] - df['Start_date'] df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'m') df ###Output _____no_output_____ ###Markdown For more details visit [this article](http://www.datasciencemadesimple.com/difference-two-timestamps-seconds-minutes-hours-pandas-python-2/) Work with different timezonesIn the next few cells, we will see how to work with timestamps that are in different time zones. ###Code # first, let's create a toy dataframe with some timestamps in different time zones df = pd.DataFrame() df['time'] = pd.concat([ pd.Series( pd.date_range( start='2014-08-01 09:00', freq='H', periods=3, tz='Europe/Berlin')), pd.Series( pd.date_range( start='2014-08-01 09:00', freq='H', periods=3, tz='US/Central')) ], axis=0) df ###Output _____no_output_____ ###Markdown We can see the different timezones indicated by the +2 and -5, respect to the meridian. ###Code # to work with different time zones, first we unify the timezone to the central one # setting utc = True df['time_utc'] = pd.to_datetime(df['time'], utc=True) # next we change all timestamps to the desired timezone, eg Europe/London # in this example df['time_london'] = df['time_utc'].dt.tz_convert('Europe/London') df ###Output _____no_output_____ ###Markdown Engineering TimeIn this demo, we are going to extract different ways of representing time from a timestamp. We can extract for example:- hour- minute- second- data- elapsed timeWe will create a toy dataset for the demonstration. ###Code import pandas as pd import numpy as np import datetime # let's create a toy data set: 1 column 7 different timestamps, # 1 hr difference between timestamp date = pd.Series(pd.date_range('2015-1-5 11:20:00', periods=7, freq='H')) df = pd.DataFrame(dict(date=date)) df ###Output _____no_output_____ ###Markdown Extract the hr, minute and second ###Code df['hour'] = df['date'].dt.hour df['min'] = df['date'].dt.minute df['sec'] = df['date'].dt.second df ###Output _____no_output_____ ###Markdown Extract time part ###Code df['time'] = df['date'].dt.time df ###Output _____no_output_____ ###Markdown Extract hr, min, sec, at the same time ###Code # now let's repeat what we did in cell 3 in 1 command df[['h','m','s']] = pd.DataFrame([(x.hour, x.minute, x.second) for x in df['time']]) df ###Output _____no_output_____ ###Markdown Calculate time difference ###Code # let's create another toy dataframe with 2 timestamp columns # and 7 rows each, in the first column the timestamps change monthly, # in the second column the timestamps change weekly date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=7, freq='M')) date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=7, freq='W')) df = pd.DataFrame(dict(Start_date = date1, End_date = date2)) df # let's calculate the time elapsed in seconds df['diff_seconds'] = df['End_date'] - df['Start_date'] df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'s') df # let's calculate the time elapsed in minutes df['diff_seconds'] = df['End_date'] - df['Start_date'] df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'m') df ###Output _____no_output_____ ###Markdown For more details visit [this article](http://www.datasciencemadesimple.com/difference-two-timestamps-seconds-minutes-hours-pandas-python-2/) Work with different timezonesIn the next few cells, we will see how to work with timestamps that are in different time zones. ###Code # first, let's create a toy dataframe with some timestamps in different time zones df = pd.DataFrame() df['time'] = pd.concat([ pd.Series( pd.date_range( start='2014-08-01 09:00', freq='H', periods=3, tz='Europe/Berlin')), pd.Series( pd.date_range( start='2014-08-01 09:00', freq='H', periods=3, tz='US/Central')) ], axis=0) df ###Output _____no_output_____ ###Markdown We can see the different timezones indicated by the +2 and -5, respect to the meridian. ###Code # to work with different time zones, first we unify the timezone to the central one # setting utc = True df['time_utc'] = pd.to_datetime(df['time'], utc=True) # next we change all timestamps to the desired timezone, eg Europe/London # in this example df['time_london'] = df['time_utc'].dt.tz_convert('Europe/London') df ###Output _____no_output_____
docs/nb/DM_Halos and DM_IGM.ipynb
###Markdown DM_Halos and DM_IGMSplitting $\langle DM_{cosmic}\rangle$ into its constituents. ###Code # imports from importlib import reload import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline as IUS from astropy import units as u from frb.halos import ModifiedNFW from frb import halos as frb_halos from frb import igm as frb_igm from frb.figures import utils as ff_utils from matplotlib import pyplot as plt plt.rcParams['font.size'] = 17 ###Output _____no_output_____ ###Markdown $\langle \rho_{diffuse, cosmic}\rangle$Use `f_diffuse` to calculate the average mass fraction of diffuse gas and diffuse gas density (physical). Math described in [DM_cosmic.ipynb](DM_cosmic.ipynb). ###Code help(frb_igm.f_diffuse) # Define redshifts zvals = np.linspace(0, 8) # Get <n_e> f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True) # Plot fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7)) fig.tight_layout() ax1 = axs[0] ax1.plot(zvals, f_diffuse, lw=2) ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$') ax2 = axs[1] ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2) ax2.set_yscale("log") ax2.set_xlabel('z') ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$') plt.show() ###Output _____no_output_____ ###Markdown $\langle n_{e,cosmic}\rangle$ ###Code help(frb_igm.ne_cosmic) # Define redshifts zvals = np.linspace(0, 8) # Get <n_e> avg_ne = frb_igm.ne_cosmic(zvals) # Visualize fig = plt.figure(figsize = (10, 6)) plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2) plt.yscale("log") plt.legend(loc = "upper left") plt.xlabel('z') plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]') plt.show() ###Output _____no_output_____ ###Markdown $\langle DM_{cosmic}\rangle$See [DM_cosmic.ipynb](DM_cosmic.ipynb) for details regarding its computation. ###Code help(frb_igm.average_DM) DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True) # Visualize fig = plt.figure(figsize = (10, 6)) plt.plot(zvals, DM_cosmic, lw=2) plt.xlabel('z') plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$') plt.show() ###Output _____no_output_____ ###Markdown $\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$ The fraction of free electrons present in halos should be equal to the fraction of diffuse gas in halos assuming the ionization state of the individual species is only dependent on redshift (and not gas density as well). $$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{diffuse,halos}}{\rho_{diffuse,cosmic}}\\& = \frac{\rho_{b, halos}f_{hot}}{\rho_{b, cosmic}f_{diffuse, cosmic}}\\\end{aligned}$$Here $\rho_b$ refers to baryon density. $f_{hot}$ refers to the fraction of baryons in halos that is in the hot phase ($\sim10^7$ K). The remaining baryons are either in the neutral phase or in dense objects like stars. Assuming halos have the same baryon mass fraction as the universal average ($\Omega_b/\Omega_M$)$$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{m, halos}f_{hot}}{\rho_{m, cosmic}f_{diffuse, cosmic}}\\& = \frac{f_{halos} f_{hot}}{f_{diffuse, cosmic}}\\\end{aligned}$$$f_{halos}$ can be computed as a function of redshift by integrating the halo mass function (HMF) times mass over some mass range and dividing it by the density of matter in the universe. This allows us to compute a line of sight integral of $\langle n_{e, halos} \rangle$ to get $\langle DM_{halos}\rangle$. $\langle DM_{IGM}\rangle$ is just obtained by subtracting this from $\langle DM_{cosmic}\rangle$.Apart from $f_{hot}$ being an obvious free parameter, we also allow variation in the radial extent of halos. This is encoded in the parameter $r_{max}$ which is the radial extent of halos in units of $r_{200}$. Setting $r_{max}>1$ (for all halos; currently it is mass independent) smoothly extends the NFW profile and the modifid profile of the encased diffuse baryons. ###Code help(frb_igm.average_DMhalos) # evaluation frb_igm.average_DMhalos(0.1) # get cumulative DM_halos dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True) dm zvals fhot_array = [0.2, 0.5, 0.75] rmax_array = [0.5, 1.0 , 2.0] # <DM_halos> for different f_hot fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7)) fig.tight_layout() ax1 = axs[0] for f_hot in fhot_array: DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True) ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot)) ax1.legend(title="f_hot") ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$') # <DM_halos> for different rmax ax2 = axs[1] for rmax in rmax_array: DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True) ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax)) ax2.legend(title="rmax") ax2.set_xlabel('z') ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$') plt.show() # Limits of calculation frb_igm.average_DMhalos(3.1) # Failure above redshift 5 frb_igm.average_DMhalos(5.1) help(frb_igm.average_DMIGM) # Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0 dm, zvals = frb_igm.average_DM(0.1, cumul= True) dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True) dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True) plt.plot(zvals, dm - dm_halos - dm_igm) plt.ylabel(r"DM $pc~cm^{-3}$") plt.xlabel("z") plt.show() ###Output _____no_output_____ ###Markdown DM_Halos and DM_IGMSplitting $\langle DM_{cosmic}\rangle$ into its constituents. ###Code # imports from importlib import reload import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline as IUS from astropy import units as u from frb.halos import ModifiedNFW from frb import halos as frb_halos from frb import igm as frb_igm from frb.figures import utils as ff_utils from matplotlib import pyplot as plt plt.rcParams['font.size'] = 17 ###Output _____no_output_____ ###Markdown $\langle \rho_{diffuse, cosmic}\rangle$Use `f_diffuse` to calculate the average mass fraction of diffuse gas and diffuse gas density (physical). Math described in [DM_cosmic.ipynb](DM_cosmic.ipynb). ###Code help(frb_igm.f_diffuse) # Define redshifts zvals = np.linspace(0, 8) # Get <n_e> f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True) # Plot fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7)) fig.tight_layout() ax1 = axs[0] ax1.plot(zvals, f_diffuse, lw=2) ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$') ax2 = axs[1] ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2) ax2.set_yscale("log") ax2.set_xlabel('z') ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$') plt.show() ###Output _____no_output_____ ###Markdown $\langle n_{e,cosmic}\rangle$ ###Code help(frb_igm.ne_cosmic) # Define redshifts zvals = np.linspace(0, 8) # Get <n_e> avg_ne = frb_igm.ne_cosmic(zvals) # Visualize fig = plt.figure(figsize = (10, 6)) plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2) plt.yscale("log") plt.legend(loc = "upper left") plt.xlabel('z') plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]') plt.show() ###Output _____no_output_____ ###Markdown $\langle DM_{cosmic}\rangle$See [DM_cosmic.ipynb](DM_cosmic.ipynb) for details regarding its computation. ###Code help(frb_igm.average_DM) DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True) # Visualize fig = plt.figure(figsize = (10, 6)) plt.plot(zvals, DM_cosmic, lw=2) plt.xlabel('z') plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$') plt.show() ###Output _____no_output_____ ###Markdown $\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$ The fraction of free electrons present in halos should be equal to the fraction of diffuse gas in halos assuming the ionization state of the individual species is only dependent on redshift (and not gas density as well). $$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{diffuse,halos}}{\rho_{diffuse,cosmic}}\\& = \frac{\rho_{b, halos}f_{hot}}{\rho_{b, cosmic}f_{diffuse, cosmic}}\\\end{aligned}$$Here $\rho_b$ refers to baryon density. $f_{hot}$ refers to the fraction of baryons in halos that is in the hot phase ($\sim10^7$ K). The remaining baryons are either in the neutral phase or in dense objects like stars. Assuming halos have the same baryon mass fraction as the universal average ($\Omega_b/\Omega_M$)$$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{m, halos}f_{hot}}{\rho_{m, cosmic}f_{diffuse, cosmic}}\\& = \frac{f_{halos} f_{hot}}{f_{diffuse, cosmic}}\\\end{aligned}$$$f_{halos}$ can be computed as a function of redshift by integrating the halo mass function (HMF) times mass over some mass range and dividing it by the density of matter in the universe. This allows us to compute a line of sight integral of $\langle n_{e, halos} \rangle$ to get $\langle DM_{halos}\rangle$. $\langle DM_{IGM}\rangle$ is just obtained by subtracting this from $\langle DM_{cosmic}\rangle$.Apart from $f_{hot}$ being an obvious free parameter, we also allow variation in the radial extent of halos. This is encoded in the parameter $r_{max}$ which is the radial extent of halos in units of $r_{200}$. Setting $r_{max}>1$ (for all halos; currently it is mass independent) smoothly extends the NFW profile and the modifid profile of the encased diffuse baryons. ###Code help(frb_igm.average_DMhalos) # evaluation frb_igm.average_DMhalos(0.1) # get cumulative DM_halos dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True) dm zvals fhot_array = [0.2, 0.5, 0.75] rmax_array = [0.5, 1.0 , 2.0] # <DM_halos> for different f_hot fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7)) fig.tight_layout() ax1 = axs[0] for f_hot in fhot_array: DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True) ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot)) ax1.legend(title="f_hot") ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$') # <DM_halos> for different rmax ax2 = axs[1] for rmax in rmax_array: DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True) ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax)) ax2.legend(title="rmax") ax2.set_xlabel('z') ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$') plt.show() # Limits of calculation frb_igm.average_DMhalos(3.1) # Failure above redshift 5 frb_igm.average_DMhalos(5.1) help(frb_igm.average_DMIGM) # Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0 dm, zvals = frb_igm.average_DM(0.1, cumul= True) dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True) dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True) plt.plot(zvals, dm - dm_halos - dm_igm) plt.ylabel(r"DM $pc~cm^{-3}$") plt.xlabel("z") plt.show() ###Output _____no_output_____ ###Markdown DM_Halos and DM_IGMSplitting $\langle DM_{cosmic}\rangle$ into its constituents. ###Code # imports from importlib import reload import numpy as np from scipy.interpolate import InterpolatedUnivariateSpline as IUS from astropy import units as u from frb.halos.models import ModifiedNFW from frb.halos import models as frb_halos from frb.halos import hmf as frb_hmf from frb.dm import igm as frb_igm from frb.figures import utils as ff_utils from matplotlib import pyplot as plt plt.rcParams['font.size'] = 17 ###Output _____no_output_____ ###Markdown $\langle \rho_{diffuse, cosmic}\rangle$Use `f_diffuse` to calculate the average mass fraction of diffuse gas and diffuse gas density (physical). Math described in [DM_cosmic.ipynb](DM_cosmic.ipynb). ###Code help(frb_igm.f_diffuse) # Define redshifts zvals = np.linspace(0, 8) # Get <n_e> f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True) # Plot fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7)) fig.tight_layout() ax1 = axs[0] ax1.plot(zvals, f_diffuse, lw=2) ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$') ax2 = axs[1] ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2) ax2.set_yscale("log") ax2.set_xlabel('z') ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$') plt.show() ###Output _____no_output_____ ###Markdown $\langle n_{e,cosmic}\rangle$ ###Code help(frb_igm.ne_cosmic) # Define redshifts zvals = np.linspace(0, 8) # Get <n_e> avg_ne = frb_igm.ne_cosmic(zvals) # Visualize fig = plt.figure(figsize = (10, 6)) plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2) plt.yscale("log") plt.legend(loc = "upper left") plt.xlabel('z') plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]') plt.show() ###Output _____no_output_____ ###Markdown $\langle DM_{cosmic}\rangle$See [DM_cosmic.ipynb](DM_cosmic.ipynb) for details regarding its computation. ###Code help(frb_igm.average_DM) DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True) # Visualize fig = plt.figure(figsize = (10, 6)) plt.plot(zvals, DM_cosmic, lw=2) plt.xlabel('z') plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$') plt.show() ###Output _____no_output_____ ###Markdown $\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$ The fraction of free electrons present in halos should be equal to the fraction of diffuse gas in halos assuming the ionization state of the individual species is only dependent on redshift (and not gas density as well). $$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{diffuse,halos}}{\rho_{diffuse,cosmic}}\\& = \frac{\rho_{b, halos}f_{hot}}{\rho_{b, cosmic}f_{diffuse, cosmic}}\\\end{aligned}$$Here $\rho_b$ refers to baryon density. $f_{hot}$ refers to the fraction of baryons in halos that is in the hot phase ($\sim10^7$ K). The remaining baryons are either in the neutral phase or in dense objects like stars. Assuming halos have the same baryon mass fraction as the universal average ($\Omega_b/\Omega_M$)$$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{m, halos}f_{hot}}{\rho_{m, cosmic}f_{diffuse, cosmic}}\\& = \frac{f_{halos} f_{hot}}{f_{diffuse, cosmic}}\\\end{aligned}$$$f_{halos}$ can be computed as a function of redshift by integrating the halo mass function (HMF) times mass over some mass range and dividing it by the density of matter in the universe. This allows us to compute a line of sight integral of $\langle n_{e, halos} \rangle$ to get $\langle DM_{halos}\rangle$. $\langle DM_{IGM}\rangle$ is just obtained by subtracting this from $\langle DM_{cosmic}\rangle$.Apart from $f_{hot}$ being an obvious free parameter, we also allow variation in the radial extent of halos. This is encoded in the parameter $r_{max}$ which is the radial extent of halos in units of $r_{200}$. Setting $r_{max}>1$ (for all halos; currently it is mass independent) smoothly extends the NFW profile and the modifid profile of the encased diffuse baryons. ###Code help(frb_igm.average_DMhalos) # evaluation frb_igm.average_DMhalos(0.1) # get cumulative DM_halos dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True) dm zvals fhot_array = [0.2, 0.5, 0.75] rmax_array = [0.5, 1.0 , 2.0] # <DM_halos> for different f_hot fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7)) fig.tight_layout() ax1 = axs[0] for f_hot in fhot_array: DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True) ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot)) ax1.legend(title="f_hot") ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$') # <DM_halos> for different rmax ax2 = axs[1] for rmax in rmax_array: DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True) ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax)) ax2.legend(title="rmax") ax2.set_xlabel('z') ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$') plt.show() # Limits of calculation frb_igm.average_DMhalos(3.1) # Failure above redshift 5 frb_igm.average_DMhalos(5.1) help(frb_igm.average_DMIGM) # Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0 dm, zvals = frb_igm.average_DM(0.1, cumul= True) dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True) dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True) plt.plot(zvals, dm - dm_halos - dm_igm) plt.ylabel(r"DM $pc~cm^{-3}$") plt.xlabel("z") plt.show() ###Output _____no_output_____
00_qdrant.ipynb
###Markdown productioncddocker run -p 6333:6333 \ -v $(pwd)/qdrant_storage:/qdrant/storage \ --name qdrant_prod \ qdrant/qdrantdocker update --restart unless-stopped qdrant_prod devcddocker run -p 6334:6333 \ -v $(pwd)/qdrant_storage_dev:/qdrant/storage \ --name qdrant_dev \ qdrant/qdrantdocker update --restart unless-stopped qdrant_dev ###Code # !pip install --upgrade qdrant_client #export dim = 768 #+ onehot.n_dim() QdrantClient.get_collections = lambda self: [c['name'] for c in self.http.collections_api.get_collections().dict()['result']['collections']] QdrantClient.collection_len = lambda self, name: self.http.collections_api.get_collection(name).dict()['result']['vectors_count'] #export prod_client = QdrantClient(host='localhost', port=6333) dev_client = QdrantClient(host='localhost', port=6334) collection_name dev_client.recreate_collection(collection_name,dim,qdrant_client.http.models.Distance.DOT) prod_client.get_collections(),dev_client.get_collections() collection_name dev_client.collection_len(collection_name) !nbdev_build_lib ###Output Converted 00_clipmodel.ipynb. Converted 00_custom_pandas.ipynb. Converted 00_paths.ipynb. Converted 00_progress_check.ipynb. Converted 00_psql.ipynb. Converted 00_qdrant.ipynb. Converted 00_tools.ipynb. Converted 01_multiple_foods.ipynb. Converted 01_multiple_foods_segmantation.ipynb. Converted 01_search.ipynb. Converted 0_template copy 2.ipynb. Converted 0_template copy.ipynb. Converted 0_template.ipynb. Converted OFA.ipynb. Converted Untitled-1.ipynb. Converted bot pseudocode.ipynb. Converted bot_test.ipynb. Converted classifying_glovo_images.ipynb. Converted foodd dataset.ipynb. Converted foods_prompted_tosql.ipynb. Converted ideas.ipynb. Converted inference.ipynb. Converted multiple3105.ipynb. Converted multiple_3005.ipynb. No export destination, ignored: #export def search_image(url=None,head = 1): image_clip = requests.post(f'https://guru.skynet.center/image2vector/?url={url}').json() results = client.search(collection_name=collection_name,query_vector=image_clip,top=head) image_clip = torch.Tensor(image_clip) df = foods.loc[[r.id for r in results]].copy() df['score'] = [r.score for r in results] df = df.sort_values('score',ascending=False) return image_clip,df.reset_index() series2tensor = lambda series:torch.tensor([np.array(c) for c in series.values]) Warning: Exporting to "None.py" but this module is not part of this build Traceback (most recent call last): File "/home/dima/anaconda3/envs/food/bin/nbdev_build_lib", line 8, in <module> sys.exit(nbdev_build_lib()) File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/fastcore/script.py", line 112, in _f tfunc(**merge(args, args_from_prog(func, xtra))) File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export2html.py", line 465, in nbdev_build_lib notebook2script(fname=fname, bare=bare) File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export.py", line 430, in notebook2script for f in sorted(files): d = _notebook2script(f, modules, silent=silent, to_dict=d, bare=bare) File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export.py", line 357, in _notebook2script if to_dict is None: _add2all(fname_out, [f"'{f}'" for f in names if '.' not in f and len(f) > 0] + extra) File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export.py", line 208, in _add2all with open(fname, 'r', encoding='utf8') as f: text = f.read() FileNotFoundError: [Errno 2] No such file or directory: '/home/dima/food/food/None.py'
NLP_Test.ipynb
###Markdown ###Code !pip install --upgrade gensim !pip install nlpia ## data wrangling import pandas as pd import numpy as np ## plotting import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") import altair as alt ## misc import json import datetime import warnings warnings.filterwarnings("ignore") import pickle from collections import Counter ## Deep Learning import keras ## pre-processing from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical ## models from keras import models from keras.models import Sequential ## layers from keras import layers from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D ## word embedding import gensim.downloader as api ## optimizer from keras.optimizers import Adam ## evaluating model from keras.callbacks import ReduceLROnPlateau from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import plot_model ###Output Using TensorFlow backend. ###Markdown Load Data ###Code ## twitter data data_url = "https://raw.githubusercontent.com/papagorgio23/GamblingTwitter_Bot/master/Data/RNN_Data1.csv" data = pd.read_csv(data_url) data.head() plt.figure(figsize = (25, 10)) ax = sns.countplot(x="screen_name", data=data) plt.xticks( rotation=45, horizontalalignment='right', fontweight='light', fontsize='x-large' ); # Length of tweets data['Tweet_length'] = data['Text_Parsed_1'].str.len() plt.figure(figsize=(12.8,6)) sns.distplot(data['Tweet_length']).set_title('Tweet length distribution'); plt.figure(figsize=(25,6)) sns.boxplot(data=data, x='screen_name', y='Tweet_length', width=.5) plt.xticks( rotation=45, horizontalalignment='right', fontweight='light', fontsize='x-large' ); # @username, links, \r and \n data['Text_Parsed_1'] = data['text'].str.replace('@[^\s]+', " ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("http\S+", " ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("#", " hashtag ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("%", " percent ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace(" - ", " ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("pts", " points ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("reb", " rebounds ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("w/", " with ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("w/o", " without ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("w/out", " without ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("o/u", " over under ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("-[0-9]", " favored by some points ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("\+[0-9]", " underdog by some points ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("\n", " ") data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace(" ", " ") data.head() print("Raw: ", data.loc[1]['text']) print() print() print("Cleaned: ", data.loc[1]['Text_Parsed_1']) ###Output Raw: looking at l assit 10 years of data. home teams on thursday night football have a 60 percent win percent . home favs win 74 percent and ats home favs are 57.4 percent . on sunday gms the l assit 10 years home teams have an overall win percent of 57 percent . home favs win 68 percent and home favs ats is only 48 percent . so anecdotally home teams have an adv on thursday night football v sun Cleaned: looking at l assit 10 years of data. home teams on thursday night football have a 60 percent win percent . home favs win 74 percent and ats home favs are 57.4 percent . on sunday gms the l assit 10 years home teams have an overall win percent of 57 percent . home favs win 68 percent and home favs ats is only 48 percent . so anecdotally home teams have an adv on thursday night football v sun ###Markdown Load Pre-Trained Word Embeddings ###Code info = api.info() print(json.dumps(info, indent=4)) ###Output { "corpora": { "semeval-2016-2017-task3-subtaskBC": { "num_records": -1, "record_format": "dict", "file_size": 6344358, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/semeval-2016-2017-task3-subtaskB-eng/__init__.py", "license": "All files released for the task are free for general research use", "fields": { "2016-train": [ "..." ], "2016-dev": [ "..." ], "2017-test": [ "..." ], "2016-test": [ "..." ] }, "description": "SemEval 2016 / 2017 Task 3 Subtask B and C datasets contain train+development (317 original questions, 3,169 related questions, and 31,690 comments), and test datasets in English. The description of the tasks and the collected data is given in sections 3 and 4.1 of the task paper http://alt.qcri.org/semeval2016/task3/data/uploads/semeval2016-task3-report.pdf linked in section \u201cPapers\u201d of https://github.com/RaRe-Technologies/gensim-data/issues/18.", "checksum": "701ea67acd82e75f95e1d8e62fb0ad29", "file_name": "semeval-2016-2017-task3-subtaskBC.gz", "read_more": [ "http://alt.qcri.org/semeval2017/task3/", "http://alt.qcri.org/semeval2017/task3/data/uploads/semeval2017-task3.pdf", "https://github.com/RaRe-Technologies/gensim-data/issues/18", "https://github.com/Witiko/semeval-2016_2017-task3-subtaskB-english" ], "parts": 1 }, "semeval-2016-2017-task3-subtaskA-unannotated": { "num_records": 189941, "record_format": "dict", "file_size": 234373151, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/semeval-2016-2017-task3-subtaskA-unannotated-eng/__init__.py", "license": "These datasets are free for general research use.", "fields": { "THREAD_SEQUENCE": "", "RelQuestion": { "RELQ_CATEGORY": "question category, according to the Qatar Living taxonomy", "RELQ_DATE": "date of posting", "RELQ_ID": "question indentifier", "RELQ_USERID": "identifier of the user asking the question", "RELQ_USERNAME": "name of the user asking the question", "RelQBody": "body of question", "RelQSubject": "subject of question" }, "RelComments": [ { "RelCText": "text of answer", "RELC_USERID": "identifier of the user posting the comment", "RELC_ID": "comment identifier", "RELC_USERNAME": "name of the user posting the comment", "RELC_DATE": "date of posting" } ] }, "description": "SemEval 2016 / 2017 Task 3 Subtask A unannotated dataset contains 189,941 questions and 1,894,456 comments in English collected from the Community Question Answering (CQA) web forum of Qatar Living. These can be used as a corpus for language modelling.", "checksum": "2de0e2f2c4f91c66ae4fcf58d50ba816", "file_name": "semeval-2016-2017-task3-subtaskA-unannotated.gz", "read_more": [ "http://alt.qcri.org/semeval2016/task3/", "http://alt.qcri.org/semeval2016/task3/data/uploads/semeval2016-task3-report.pdf", "https://github.com/RaRe-Technologies/gensim-data/issues/18", "https://github.com/Witiko/semeval-2016_2017-task3-subtaskA-unannotated-english" ], "parts": 1 }, "patent-2017": { "num_records": 353197, "record_format": "dict", "file_size": 3087262469, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/patent-2017/__init__.py", "license": "not found", "description": "Patent Grant Full Text. Contains the full text including tables, sequence data and 'in-line' mathematical expressions of each patent grant issued in 2017.", "checksum-0": "818501f0b9af62d3b88294d86d509f8f", "checksum-1": "66c05635c1d3c7a19b4a335829d09ffa", "file_name": "patent-2017.gz", "read_more": [ "http://patents.reedtech.com/pgrbft.php" ], "parts": 2 }, "quora-duplicate-questions": { "num_records": 404290, "record_format": "dict", "file_size": 21684784, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/quora-duplicate-questions/__init__.py", "license": "probably https://www.quora.com/about/tos", "fields": { "question1": "the full text of each question", "question2": "the full text of each question", "qid1": "unique ids of each question", "qid2": "unique ids of each question", "id": "the id of a training set question pair", "is_duplicate": "the target variable, set to 1 if question1 and question2 have essentially the same meaning, and 0 otherwise" }, "description": "Over 400,000 lines of potential question duplicate pairs. Each line contains IDs for each question in the pair, the full text for each question, and a binary value that indicates whether the line contains a duplicate pair or not.", "checksum": "d7cfa7fbc6e2ec71ab74c495586c6365", "file_name": "quora-duplicate-questions.gz", "read_more": [ "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs" ], "parts": 1 }, "wiki-english-20171001": { "num_records": 4924894, "record_format": "dict", "file_size": 6516051717, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/wiki-english-20171001/__init__.py", "license": "https://dumps.wikimedia.org/legal.html", "fields": { "section_texts": "list of body of sections", "section_titles": "list of titles of sections", "title": "Title of wiki article" }, "description": "Extracted Wikipedia dump from October 2017. Produced by `python -m gensim.scripts.segment_wiki -f enwiki-20171001-pages-articles.xml.bz2 -o wiki-en.gz`", "checksum-0": "a7d7d7fd41ea7e2d7fa32ec1bb640d71", "checksum-1": "b2683e3356ffbca3b6c2dca6e9801f9f", "checksum-2": "c5cde2a9ae77b3c4ebce804f6df542c2", "checksum-3": "00b71144ed5e3aeeb885de84f7452b81", "file_name": "wiki-english-20171001.gz", "read_more": [ "https://dumps.wikimedia.org/enwiki/20171001/" ], "parts": 4 }, "text8": { "num_records": 1701, "record_format": "list of str (tokens)", "file_size": 33182058, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/text8/__init__.py", "license": "not found", "description": "First 100,000,000 bytes of plain text from Wikipedia. Used for testing purposes; see wiki-english-* for proper full Wikipedia datasets.", "checksum": "68799af40b6bda07dfa47a32612e5364", "file_name": "text8.gz", "read_more": [ "http://mattmahoney.net/dc/textdata.html" ], "parts": 1 }, "fake-news": { "num_records": 12999, "record_format": "dict", "file_size": 20102776, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/fake-news/__init__.py", "license": "https://creativecommons.org/publicdomain/zero/1.0/", "fields": { "crawled": "date the story was archived", "ord_in_thread": "", "published": "date published", "participants_count": "number of participants", "shares": "number of Facebook shares", "replies_count": "number of replies", "main_img_url": "image from story", "spam_score": "data from webhose.io", "uuid": "unique identifier", "language": "data from webhose.io", "title": "title of story", "country": "data from webhose.io", "domain_rank": "data from webhose.io", "author": "author of story", "comments": "number of Facebook comments", "site_url": "site URL from BS detector", "text": "text of story", "thread_title": "", "type": "type of website (label from BS detector)", "likes": "number of Facebook likes" }, "description": "News dataset, contains text and metadata from 244 websites and represents 12,999 posts in total from a specific window of 30 days. The data was pulled using the webhose.io API, and because it's coming from their crawler, not all websites identified by their BS Detector are present in this dataset. Data sources that were missing a label were simply assigned a label of 'bs'. There are (ostensibly) no genuine, reliable, or trustworthy news sources represented in this dataset (so far), so don't trust anything you read.", "checksum": "5e64e942df13219465927f92dcefd5fe", "file_name": "fake-news.gz", "read_more": [ "https://www.kaggle.com/mrisdal/fake-news" ], "parts": 1 }, "20-newsgroups": { "num_records": 18846, "record_format": "dict", "file_size": 14483581, "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/20-newsgroups/__init__.py", "license": "not found", "fields": { "topic": "name of topic (20 variant of possible values)", "set": "marker of original split (possible values 'train' and 'test')", "data": "", "id": "original id inferred from folder name" }, "description": "The notorious collection of approximately 20,000 newsgroup posts, partitioned (nearly) evenly across 20 different newsgroups.", "checksum": "c92fd4f6640a86d5ba89eaad818a9891", "file_name": "20-newsgroups.gz", "read_more": [ "http://qwone.com/~jason/20Newsgroups/" ], "parts": 1 }, "__testing_matrix-synopsis": { "description": "[THIS IS ONLY FOR TESTING] Synopsis of the movie matrix.", "checksum": "1767ac93a089b43899d54944b07d9dc5", "file_name": "__testing_matrix-synopsis.gz", "read_more": [ "http://www.imdb.com/title/tt0133093/plotsummary?ref_=ttpl_pl_syn#synopsis" ], "parts": 1 }, "__testing_multipart-matrix-synopsis": { "description": "[THIS IS ONLY FOR TESTING] Synopsis of the movie matrix.", "checksum-0": "c8b0c7d8cf562b1b632c262a173ac338", "checksum-1": "5ff7fc6818e9a5d9bc1cf12c35ed8b96", "checksum-2": "966db9d274d125beaac7987202076cba", "file_name": "__testing_multipart-matrix-synopsis.gz", "read_more": [ "http://www.imdb.com/title/tt0133093/plotsummary?ref_=ttpl_pl_syn#synopsis" ], "parts": 3 } }, "models": { "fasttext-wiki-news-subwords-300": { "num_records": 999999, "file_size": 1005007116, "base_dataset": "Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/fasttext-wiki-news-subwords-300/__init__.py", "license": "https://creativecommons.org/licenses/by-sa/3.0/", "parameters": { "dimension": 300 }, "description": "1 million word vectors trained on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens).", "read_more": [ "https://fasttext.cc/docs/en/english-vectors.html", "https://arxiv.org/abs/1712.09405", "https://arxiv.org/abs/1607.01759" ], "checksum": "de2bb3a20c46ce65c9c131e1ad9a77af", "file_name": "fasttext-wiki-news-subwords-300.gz", "parts": 1 }, "conceptnet-numberbatch-17-06-300": { "num_records": 1917247, "file_size": 1225497562, "base_dataset": "ConceptNet, word2vec, GloVe, and OpenSubtitles 2016", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/conceptnet-numberbatch-17-06-300/__init__.py", "license": "https://github.com/commonsense/conceptnet-numberbatch/blob/master/LICENSE.txt", "parameters": { "dimension": 300 }, "description": "ConceptNet Numberbatch consists of state-of-the-art semantic vectors (also known as word embeddings) that can be used directly as a representation of word meanings or as a starting point for further machine learning. ConceptNet Numberbatch is part of the ConceptNet open data project. ConceptNet provides lots of ways to compute with word meanings, one of which is word embeddings. ConceptNet Numberbatch is a snapshot of just the word embeddings. It is built using an ensemble that combines data from ConceptNet, word2vec, GloVe, and OpenSubtitles 2016, using a variation on retrofitting.", "read_more": [ "http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14972", "https://github.com/commonsense/conceptnet-numberbatch", "http://conceptnet.io/" ], "checksum": "fd642d457adcd0ea94da0cd21b150847", "file_name": "conceptnet-numberbatch-17-06-300.gz", "parts": 1 }, "word2vec-ruscorpora-300": { "num_records": 184973, "file_size": 208427381, "base_dataset": "Russian National Corpus (about 250M words)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/word2vec-ruscorpora-300/__init__.py", "license": "https://creativecommons.org/licenses/by/4.0/deed.en", "parameters": { "dimension": 300, "window_size": 10 }, "description": "Word2vec Continuous Skipgram vectors trained on full Russian National Corpus (about 250M words). The model contains 185K words.", "preprocessing": "The corpus was lemmatized and tagged with Universal PoS", "read_more": [ "https://www.academia.edu/24306935/WebVectors_a_Toolkit_for_Building_Web_Interfaces_for_Vector_Semantic_Models", "http://rusvectores.org/en/", "https://github.com/RaRe-Technologies/gensim-data/issues/3" ], "checksum": "9bdebdc8ae6d17d20839dd9b5af10bc4", "file_name": "word2vec-ruscorpora-300.gz", "parts": 1 }, "word2vec-google-news-300": { "num_records": 3000000, "file_size": 1743563840, "base_dataset": "Google News (about 100 billion words)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/word2vec-google-news-300/__init__.py", "license": "not found", "parameters": { "dimension": 300 }, "description": "Pre-trained vectors trained on a part of the Google News dataset (about 100 billion words). The model contains 300-dimensional vectors for 3 million words and phrases. The phrases were obtained using a simple data-driven approach described in 'Distributed Representations of Words and Phrases and their Compositionality' (https://code.google.com/archive/p/word2vec/).", "read_more": [ "https://code.google.com/archive/p/word2vec/", "https://arxiv.org/abs/1301.3781", "https://arxiv.org/abs/1310.4546", "https://www.microsoft.com/en-us/research/publication/linguistic-regularities-in-continuous-space-word-representations/?from=http%3A%2F%2Fresearch.microsoft.com%2Fpubs%2F189726%2Frvecs.pdf" ], "checksum": "a5e5354d40acb95f9ec66d5977d140ef", "file_name": "word2vec-google-news-300.gz", "parts": 1 }, "glove-wiki-gigaword-50": { "num_records": 400000, "file_size": 69182535, "base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-50/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 50 }, "description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword, 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-50.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "c289bc5d7f2f02c6dc9f2f9b67641813", "file_name": "glove-wiki-gigaword-50.gz", "parts": 1 }, "glove-wiki-gigaword-100": { "num_records": 400000, "file_size": 134300434, "base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-100/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 100 }, "description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-100.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "40ec481866001177b8cd4cb0df92924f", "file_name": "glove-wiki-gigaword-100.gz", "parts": 1 }, "glove-wiki-gigaword-200": { "num_records": 400000, "file_size": 264336934, "base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-200/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 200 }, "description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword, 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-200.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "59652db361b7a87ee73834a6c391dfc1", "file_name": "glove-wiki-gigaword-200.gz", "parts": 1 }, "glove-wiki-gigaword-300": { "num_records": 400000, "file_size": 394362229, "base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-300/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 300 }, "description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword, 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-300.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "29e9329ac2241937d55b852e8284e89b", "file_name": "glove-wiki-gigaword-300.gz", "parts": 1 }, "glove-twitter-25": { "num_records": 1193514, "file_size": 109885004, "base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-25/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 25 }, "description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/).", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-25.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "50db0211d7e7a2dcd362c6b774762793", "file_name": "glove-twitter-25.gz", "parts": 1 }, "glove-twitter-50": { "num_records": 1193514, "file_size": 209216938, "base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-50/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 50 }, "description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/)", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-50.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "c168f18641f8c8a00fe30984c4799b2b", "file_name": "glove-twitter-50.gz", "parts": 1 }, "glove-twitter-100": { "num_records": 1193514, "file_size": 405932991, "base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-100/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 100 }, "description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/)", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-100.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "b04f7bed38756d64cf55b58ce7e97b15", "file_name": "glove-twitter-100.gz", "parts": 1 }, "glove-twitter-200": { "num_records": 1193514, "file_size": 795373100, "base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)", "reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-200/__init__.py", "license": "http://opendatacommons.org/licenses/pddl/", "parameters": { "dimension": 200 }, "description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/).", "preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-200.txt`.", "read_more": [ "https://nlp.stanford.edu/projects/glove/", "https://nlp.stanford.edu/pubs/glove.pdf" ], "checksum": "e52e8392d1860b95d5308a525817d8f9", "file_name": "glove-twitter-200.gz", "parts": 1 }, "__testing_word2vec-matrix-synopsis": { "description": "[THIS IS ONLY FOR TESTING] Word vecrors of the movie matrix.", "parameters": { "dimensions": 50 }, "preprocessing": "Converted to w2v using a preprocessed corpus. Converted to w2v format with `python3.5 -m gensim.models.word2vec -train <input_filename> -iter 50 -output <output_filename>`.", "read_more": [], "checksum": "534dcb8b56a360977a269b7bfc62d124", "file_name": "__testing_word2vec-matrix-synopsis.gz", "parts": 1 } } } ###Markdown List of Models ###Code for model_name, model_data in sorted(info['models'].items()): print( '%s (%d records): %s' % ( model_name, model_data.get('num_records', -1), model_data['description'][:40] + '...', ) ) ###Output __testing_word2vec-matrix-synopsis (-1 records): [THIS IS ONLY FOR TESTING] Word vecrors ... conceptnet-numberbatch-17-06-300 (1917247 records): ConceptNet Numberbatch consists of state... fasttext-wiki-news-subwords-300 (999999 records): 1 million word vectors trained on Wikipe... glove-twitter-100 (1193514 records): Pre-trained vectors based on 2B tweets,... glove-twitter-200 (1193514 records): Pre-trained vectors based on 2B tweets, ... glove-twitter-25 (1193514 records): Pre-trained vectors based on 2B tweets, ... glove-twitter-50 (1193514 records): Pre-trained vectors based on 2B tweets, ... glove-wiki-gigaword-100 (400000 records): Pre-trained vectors based on Wikipedia 2... glove-wiki-gigaword-200 (400000 records): Pre-trained vectors based on Wikipedia 2... glove-wiki-gigaword-300 (400000 records): Pre-trained vectors based on Wikipedia 2... glove-wiki-gigaword-50 (400000 records): Pre-trained vectors based on Wikipedia 2... word2vec-google-news-300 (3000000 records): Pre-trained vectors trained on a part of... word2vec-ruscorpora-300 (184973 records): Word2vec Continuous Skipgram vectors tra... ###Markdown Word2Vec Model ###Code model = api.load("word2vec-google-news-300") model.most_similar("glass") model.most_similar("glass") ###Output _____no_output_____ ###Markdown Glove Model I will be using the GloVe Twitter pretrained model with 200 dimensions or possibly the 25 dimension model ###Code model_25 = api.load("glove-twitter-25") model_200 = api.load("glove-twitter-200") model.most_similar("glass") model.most_similar("imo") model_25.get_vector("football") texts = [ ["this", "is", "just", "a", "test", "too"], ["maybe", "the", "nfl", "will", "get", "shutdown", "too"], ["I'll", "bet", "on", "the", "49ers", "tonight"] ] labels = [ ["Rufus"], ["Jeff Ma"], ["A.I. Sports"] ] from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import numpy as np maxlen = 10 # We will cut reviews after 100 words training_samples = 3 # We will be training on 200 samples validation_samples = 10000 # We will be validating on 10,000 samples max_words = 10000 # We will only consider the top 10,000 words in the dataset tokenizer = Tokenizer(num_words=max_words) tokenizer.fit_on_texts(texts) sequences = tokenizer.texts_to_sequences(texts) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) data = pad_sequences(sequences, maxlen=maxlen) labels = np.asarray(labels) print('Shape of data tensor:', data.shape) print('Shape of label tensor:', labels.shape) # Split the data into a training set and a validation set # But first, shuffle the data, since we started from data # where sample are ordered (all negative first, then all positive). #indices = np.arange(data.shape[0]) #np.random.shuffle(indices) #data = data[indices] #labels = labels[indices] #x_train = data[:training_samples] #y_train = labels[:training_samples] #x_val = data[training_samples: training_samples + validation_samples] #y_val = labels[training_samples: training_samples + validation_samples] embedding_dim = 300 embedding_matrix = np.zeros((max_words, embedding_dim)) data labels len(model_25.vocab.keys()) embedding_dim = 25 max_words = 19 embedding_matrix = np.zeros((max_words, embedding_dim)) word_index nb_words = min(max_words, len(word_index)) # randomized weights for missing words... !?? embedding_matrix = (np.random.rand(nb_words, embedding_dim) - 0.5) / 5.0 len(embedding_matrix) for word, i in word_index.items(): if i >= max_features: continue if word in model_25: embedding_vector = model_25.get_vector(word) embedding_matrix[i] = embedding_vector embedding_matrix ###Output _____no_output_____ ###Markdown DNN ###Code from keras.models import Sequential from keras.layers import Embedding, Flatten, Dense model_dnn = Sequential(name = "Dense Neural Network") model_dnn.add(Embedding(max_words, embedding_dim, input_length=maxlen, name = "GloVe_Twitter-25")) model_dnn.add(Flatten()) model_dnn.add(Dense(32, activation='relu')) model_dnn.add(Dense(42, activation='softmax')) ## Freeze the embedding weights equal to our pretrained model model_dnn.layers[0].set_weights([embedding_matrix]) model_dnn.layers[0].trainable = False model_dnn.summary() model_dnn.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) history = model_dnn.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_val, y_val)) model_dnn.save_weights('pre_trained_glove_model.h5') ###Output _____no_output_____ ###Markdown 1D CNN ###Code from keras.datasets import imdb from keras.preprocessing import sequence max_features = 10000 max_len = 500 print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=max_len) x_test = sequence.pad_sequences(x_test, maxlen=max_len) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) from keras.models import Sequential from keras import layers from keras.optimizers import RMSprop model_cnn = Sequential(name="1D_Convolutional_Neural_Network") model_cnn.add(Embedding(max_words, embedding_dim, input_length=maxlen)) model_cnn.add(layers.Conv1D(32, 7, activation='relu')) model_cnn.add(layers.MaxPooling1D(2)) model_cnn.add(layers.Conv1D(32, 7, activation='relu')) model_cnn.add(layers.GlobalMaxPooling1D()) model_cnn.add(layers.Dense(1)) ## Freeze the embedding weights equal to our pretrained model model_cnn.layers[0].set_weights([embedding_matrix]) model_cnn.layers[0].trainable = False # print model summary model_cnn.summary() model_cnn.compile(optimizer=RMSprop(lr=1e-4), loss='binary_crossentropy', metrics=['acc']) history = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2) ###Output _____no_output_____ ###Markdown RNN ###Code # define model model_rnn = Sequential(name="Recurrnet_Neural_Network") model_rnn.add(Embedding(max_words, embedding_dim, input_length=maxlen)) model_rnn.add(LSTM(100, return_sequences=True)) model_rnn.add(LSTM(100)) model_rnn.add(Dense(100, activation='relu')) model_rnn.add(Dense(42, activation='softmax')) ## Freeze the embedding weights equal to our pretrained model model_rnn.layers[0].set_weights([embedding_matrix]) model_rnn.layers[0].trainable = False # print model summary model_rnn.summary() ###Output _____no_output_____
Pyramid Scene Parsing Network (PSPNet) Review/code/์ด์˜์„๋‹˜ PSPNet_Code Review.ipynb
###Markdown PSPNet![fig_3](./fig_3.png)์ฝ”๋“œ์˜ ๊ตฌํ˜„์—๋Š” https://github.com/hszhao/semseg ๋ฅผ ์ฐธ๊ณ ํ•˜์˜€์Šต๋‹ˆ๋‹ค. ์ˆœ์„œ1. Dilated ResNet ์ฝ”๋“œ2. Pyramid Parsing Module ์ฝ”๋“œ3. PSPNet ์ „์ฒด ์ฝ”๋“œ 1. Dilated Residual Network (Dilated ResNet) ###Code import torch import torch.nn as nn from torchinfo import summary import torch.nn.functional as F import resnet as models device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class DilatedResNet(nn.Module): def __init__(self, layers=50, pretrained=True): super(DilatedResNet, self).__init__() # ResNet 50 if layers == 50: resnet = models.resnet50(pretrained=pretrained) # ResNet 101 elif layers == 101: resnet = models.resnet101(pretrained=pretrained) # ResNet 152 else: resnet = models.resnet152(pretrained=pretrained) # ResNet with dilated network self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) self.layer1 = resnet.layer1 self.layer2 = resnet.layer2 self.layer3 = resnet.layer3 self.layer4 = resnet.layer4 for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) def forward(self, x, y=None): x = self.layer0(x) x = self.layer1(x) x = self.layer2(x) x_tmp = self.layer3(x) x = self.layer4(x_tmp) return x inp = torch.rand(4, 3, 200, 200) layers = 50 resnet = DilatedResNet(layers=layers, pretrained=False) output = resnet(inp) print(f"Dilated ResNet {layers}'s output size : {output.size()}") ###Output Dilated ResNet 50's output size : torch.Size([4, 2048, 25, 25]) ###Markdown 2. Pyramid Pooling Module ###Code class PPM(nn.Module): def __init__(self, in_dim, reduction_dim, bins): super(PPM, self).__init__() self.features = [] # bins = (1, 2, 3, 6) : 1x1, 2x2, 3x3, 6x6 for bin in bins: self.features.append(nn.Sequential( # Pyramid scale์— ๋”ฐ๋ผ ๊ฐ๊ฐ์˜ pooling์„ ์ƒ์„ฑ nn.AdaptiveAvgPool2d(bin), # 1/N์œผ๋กœ dimension reduction (reduction_dim = 4, pyramid level์˜ ์ˆ˜) nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False), nn.BatchNorm2d(reduction_dim), nn.ReLU(inplace=True) )) self.features = nn.ModuleList(self.features) def forward(self, x): x_size = x.size() out = [x] for f in self.features: out.append(F.interpolate(f(x), x_size[2:], mode='bilinear', align_corners=True)) # ๊ฐ๊ฐ์˜ pyramid scale์— ๋”ฐ๋ฅธ pooling ๊ฒฐ๊ณผ๋“ค์„ concatenate return torch.cat(out, 1) # input features dim : 2048 in_dim = output.size()[1] # pyramid pooling levels : 1x1, 2x2, 3x3, 6x6 bins = (1, 2, 3, 6) # dimension reduction : 1 / N reduction_dim = int(in_dim / len(bins)) # N = 4 ppm = PPM(in_dim=in_dim, reduction_dim=reduction_dim, bins=bins) output = ppm(output) print(f"Pyramid Pooling Module's output size : {output.size()}") ###Output Pyramid Pooling Module's output size : torch.Size([4, 4096, 25, 25]) ###Markdown AdaptiveAvgPool2d ###Code inp = torch.tensor([[[[1., 2., 3.], [4., 5., 6.], [7., 8., 9]]]], dtype = torch.float) print(inp.shape) print(inp) out = nn.AdaptiveAvgPool2d(2)(inp) print(out) # print(torch.tensor( # [[[(1. + 2. + 4. + 5.) / 4, (2. + 3. + 5. + 6.) / 4], # [(4. + 5. + 7. + 8.) / 4, (5. + 6. + 8. + 9.) / 4]]])) # Global Average Pooling out = nn.AdaptiveAvgPool2d(1)(inp) print(out) ###Output tensor([[[[5.]]]]) ###Markdown 3. PSPNet ์ „์ฒด ์ฝ”๋“œ ###Code class PSPNet(nn.Module): def __init__(self, layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, pretrained=True): super(PSPNet, self).__init__() # output์˜ ํฌ๊ธฐ๋ฅผ ์›๋ณธ ์ด๋ฏธ์ง€์™€ ๋™์ผํ•˜๊ฒŒ ๋ณต์›ํ•˜๊ธฐ ์œ„ํ•œ ๊ฐ’ # Feature map์˜ ํฌ๊ธฐ๋Š” ์›๋ณธ ์ด๋ฏธ์ง€์˜ 1/8 self.zoom_factor = zoom_factor self.criterion = nn.CrossEntropyLoss() # ResNet if layers == 50: resnet = models.resnet50(pretrained=pretrained) elif layers == 101: resnet = models.resnet101(pretrained=pretrained) else: resnet = models.resnet152(pretrained=pretrained) # ResNet with dilated network self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2, resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool) self.layer1 = resnet.layer1 self.layer2 = resnet.layer2 self.layer3 = resnet.layer3 self.layer4 = resnet.layer4 for n, m in self.layer3.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) for n, m in self.layer4.named_modules(): if 'conv2' in n: m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1) elif 'downsample.0' in n: m.stride = (1, 1) # Dilated ResNet output size : torch.Size([4, 2048, 60, 60]) fea_dim = 2048 self.ppm = PPM(in_dim = fea_dim, reduction_dim = int(fea_dim / len(bins)), bins=bins) # Pyramid Pooling Module output size : torch.Size([4, 4096, 60, 60]) fea_dim *= 2 # 4096 self.cls = nn.Sequential( nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout), nn.Conv2d(512, classes, kernel_size=1) ) if self.training: self.aux = nn.Sequential( nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Dropout2d(p=dropout), nn.Conv2d(256, classes, kernel_size=1) ) def forward(self, x, y=None): x_size = x.size() # Input image's height, width h = int((x_size[2] - 1) / 8 * self.zoom_factor + 1) w = int((x_size[3] - 1) / 8 * self.zoom_factor + 1) # Resnet with dilated network x = self.layer0(x) x = self.layer1(x) x = self.layer2(x) x_tmp = self.layer3(x) x = self.layer4(x_tmp) # Pyramid Pooling Module x = self.ppm(x) # Master branch x = self.cls(x) # ์›๋ณธ ์ด๋ฏธ์ง€ ํฌ๊ธฐ๋กœ upsampling if self.zoom_factor != 1: x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True) if self.training: # Auxiliary Loss๋Š” training์—์„œ๋งŒ ์‚ฌ์šฉ aux = self.aux(x_tmp) # ์›๋ณธ ์ด๋ฏธ์ง€ ํฌ๊ธฐ๋กœ upsampling if self.zoom_factor != 1: aux = F.interpolate(aux, size=(h, w), mode='bilinear', align_corners=True) main_loss = self.criterion(x, y) aux_loss = self.criterion(aux, y) return x.max(1)[1], main_loss, aux_loss else: return x inp = torch.rand(4, 3, 473, 473).to(device) layers = 50 pspnet = PSPNet(layers=layers, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, pretrained=False).to(device) pspnet.eval() output = pspnet(inp) print(f"PSPNet with Dilated ResNet {layers}'s output size : {output.size()}") ###Output PSPNet with Dilated ResNet 50's output size : torch.Size([4, 2, 473, 473])
Modulo2/1. Estructuras de Control Iterativas.ipynb
###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code '#'*5 ###Output Altura: 4 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code n = int(input("Introduce un nรบmero entero positivo mayor que 2: ")) i = 2 while n % i != 0: i += 1 if i == n: print(str(n) + " es primo") else: print(str(n) + " no es primo") ###Output Introduce un nรบmero entero positivo mayor que 2: 4 ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista_3 = [] for letra in lista_1: if letra in lista_2 and letra not in lista_3: lista_3.append(letra) print(lista_3) ###Output ['h', 'o', 'l', 'a', ' ', 'u', 'n'] ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n=4 '#'*5 #Formando un triรกngulo n=int(input("Ingrese un numero entero: ")) c=0 while c <= n: print("#"*c) c +=1 ###Output Ingrese un numero entero: 7 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code numero=int(input("Ingreso un numero entero: ")) c=0 for i in range(1,numero+1): if (numero%i)==0: c += 1 if c == 2: print("El numero es PRIMO") else: print("EL numero NO ES PRIMO") ###Output _____no_output_____ ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] con1=set(lista_1) con2=set(lista_2) lista_3=list(con1.union(con2)) lista_3 ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* # para el aรฑo 2008,2009,2010 no debo entregar informe # si el aรฑo es 2007 parar el bucle anio = 2001 while anio <= 2012: #if anio == 2007: # print('salida del bucle') # break # salir del bucle if anio in [2008,2009,2010]: print(f'no presentar informe anio {anio}') anio += 1 continue # continua a la sigueinte iteracion print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 1 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 no presentar informe anio 2008 no presentar informe anio 2009 no presentar informe anio 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. ###Code c = 0 while c <= 5: print("c vale ",c) if c == 4: print("Rompemos el bucle cuando c vale ", c) break c+=1 print("bucle finalizado") ###Output c vale 0 c vale 1 c vale 2 c vale 3 c vale 4 Rompemos el bucle cuando c vale 4 bucle finalizado ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") flag = True while flag: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() # me devuelve un string '' if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") break #flag = False elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print(f"El resultado de la suma es: {n1+n2}") break #flag = False elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") #flag = False break else: print("Comando desconocido, vuelve a intentarlo") print("otra cosa xd ") pass # end while print("finalizo el programa") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Suma de los dos numeros 2) Restar el 1er nro menos el 2do nro 3) Multiplicar los dos nros""") opcion = input("Ingrese la opcion ") nro1 = int(input("Ingrese el 1er nro ")) nro2 = int(input("Ingrese el 2do nro ")) if opcion == '1': print(f"L suma de los nros es: {nro1 + nro2}") break elif opcion == '2': print(f"La resta de los nros es: {nro1 - nro2}") break elif opcion =='3': print(f"La multiplicacion de los nros es: {nro1*nro2}") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Suma de los dos numeros 2) Restar el 1er nro menos el 2do nro 3) Multiplicar los dos nros ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for nombre in mi_lista: print(nombre) num1, num2 = [12,15] print(num1) print(num2) for i,nombre in enumerate(mi_lista): print(i, nombre) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio','Juan', 'Pedro', 'Herminio','Juan'] for indice,nombre in enumerate(mi_lista): # print(indice, nombre) if nombre == 'Juan': mi_lista[indice] = 'Maria' #mi_lista.remove("Juan") print(mi_lista) mi_lista = ['Juan', 'Antonio','Juan', 'Pedro', 'Herminio','Juan'] mi_lista.count("Juan") # cantidad de veces de 'Juan' en la lista for i in range(mi_lista.count("Juan")): mi_lista.remove("Juan") print(mi_lista) # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'key1':1,'key2':2,'key3':3} for key,value in dicx.items(): if key == 'key1': dicx[key] = 10 dicx dicx.items() dicx['key1'] = 10 dicx['nombres'] = ['Maria', 'Antonio', 'Maria', 'Pedro', 'Herminio', 'Maria'] dicx # Iterando sobre strings texto = 'Hola Mundo' for i, letra in enumerate(texto): print(i, letra) texto= 'Hola Mundo' texto texto[1] = "@" ## reeemplazar 'o' por 'x' en la cadena texto texto= 'Hola Mundo' new_texto = "" for i, l in enumerate(texto): if l == 'o': # agrego 'x' new_texto += 'x' else: # agrego la letra new_texto += l new_texto texto_s = '' for l in texto: if l == 'o': texto_s += 'x' continue texto_s += l print(l) texto_s texto_s = '' for l in texto: texto_s = texto_s + l print(texto_s) texto_s ###Output H Ho Hol Hola Hola Hola M Hola Mu Hola Mun Hola Mund Hola Mundo ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # range -> no es un lista # si quiero convetir el range a una lista debo hacer [*range(3000,4000,100)] # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print(f"Informes del Aรฑo {anio}") # por defecto el inicio de la funcion es 0 for i in range(10): print(f'#{i}') ###Output #0 #1 #2 #3 #4 #5 #6 #7 #8 #9 ###Markdown EJERCICIOS----------------------------- 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code #1.cuantos nรบmeros quiere introducir cantidad = int(input('Cantidad de numeros a introducir: ')) cantidad #2. lee todos los nรบmeros #a = int(input("numero 1")) #b = int(input("numero 2")) #c = int(input("numero 3")) lista_numeros = [] for i in range(cantidad): num = int(input(f"Ingrese el numero {i +1}: ")) lista_numeros.append(num) lista_numeros #3.media aritmรฉtica #p = (a + b + c) / cantidad #3 promedio = 0 for num in lista_numeros: promedio += num promedio = promedio / cantidad print(f"La media aritmรฉtica de los numeros es {promedio}") #sum(lista_numeros)/cantidad #### funcions def calculo_media(lista_numeros): promedio = 0 for num in lista_numeros: promedio += num return promedio / cantidad calculo_media([1,2,5]) ###Output _____no_output_____ ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code #1. Escribir un programa que pida al usuario un nรบmero entero h = int(input('Cantidad de numeros a introducir: ')) h #2.muestre por pantalla un triรกngulo rectรกngulo altura 'h' for i in range(1, h +1 ): print('#' *i) '#'*4 for i in range(1, h + 1): print(i) ###Output 1 2 3 4 ###Markdown ###Code h = 4 for i in range(1, h+1): print(' '* (h-i) + '#' * i) ' '*3 + '#' * 1 ' ##' ' '*2 + '#' * 2 ' ###' ' '*1 + '#' * 3 ###Output _____no_output_____ ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) #c+=1 d="hola" print(d,"mundo") ###Output hola mundo ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Luis'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) lista_num #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n=4 '#'*5 numero=int(input("Introduce un numero entero:")) tri=1 while tri<=numero: print("#"*tri) tri+=1 ###Output # ## ### #### ##### ###### ####### ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code num=int(input("Escribe un numer entero: ")) lista=[] div=1 while div<=num: lisdiv=num/div lista.append(lisdiv) div+=1 lista ###Output _____no_output_____ ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista3=[] for let in lista_1: if let in lista_2 and let not in lista3: lista3.append(let) lista3 ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(str(anio))) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: # print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while(True): print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) ###Output val1 1 val2 2 val3 3 ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(str(anio))) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: # print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while(True): print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) ###Output val1 1 val2 2 val3 3 ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad=int(input("Ingrese la cantidad de numeros a ingresar:")) lista_num=[] for i in range(cantidad): num=float(input(f"Ingrese el numero n{i+1}:")) lista_num.append(num) lista_num suma_ = 0 for e in lista_num: suma_=suma_+e media=suma_/cantidad print("El promedio de la suma es :",media) # FOrma 4 i=1 suma=0 while i<= cantidad: num = float(input((f"Ingrese el numero n{i}:"))) suma=suma+ num i+=1 print("El promedio es:", suma/cantidad) ###Output Ingrese el numero n1: 8 Ingrese el numero n2: 10 Ingrese el numero n3: 4 Ingrese el numero n4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n = int(input("Introduce la altura del triรกngulo (entero positivo): ")) for i in range(n): print("#"*(i+1)) ###Output Introduce la altura del triรกngulo (entero positivo): 6 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code n=int(input("Ingrese un nรบmero entero:")) i=2 while n%i!=0: i+=1 if i==n: print(str(n),"es primo") else: print(str(n)," no es primo") ###Output Ingrese un nรบmero entero: 47 ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista_3=[] for letra in lista_1: if letra in lista_2 and letra not in lista_3: lista_3.append(letra) print(lista_3) ###Output ['h', 'o', 'l', 'a', ' ', 'u', 'n'] ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. ###Code n = int(input("Altura: ")) for i in range(n): print("*"*(i+1)) ###Output Altura: 3 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code n = int(input("Introduce un nรบmero entero positivo mayor que 2: ")) i = 2 while n % i != 0: i += 1 if i == n: print(str(n) + " es primo") else: print(str(n) + " no es primo") ###Output _____no_output_____ ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista_3= [] for letra in lista_1: if letra in lista_2 and letra not in lista_3: lista_3.append(letra) print(lista_3) ###Output ['h', 'o', 'l', 'a', ' ', 'u', 'n'] ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 1 # anio += 1 ###Output _____no_output_____ ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") bandera = True while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien")3 elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") #bandera = False break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code n1 = float(input("Digite primer nรบmero: ")) n2 = float(input("Digite segundo nรบmero: ")) while(True): print("****MENU****") print("1.- Sumar dos nรบmeros") print("2.- Restar dos nรบmeros") print("3.- Multiplicar dos nรบmeros") print("4.- Salir del programa") opcion = input("Opciรณn: ") if opcion == '1': resultado = n1 + n2 elif opcion == '2': resultado = n1-n2 elif opcion == '3': resultado =n1*n2 elif opcion =='4': break else: print("Digite una opciรณn vรกlida") continue print("El resultado es:",resultado) ###Output Digite primer nรบmero: 15 Digite segundo nรบmero: 12 ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): # print(indice, nombre) if nombre == 'Juan': mi_lista[indice] = 'Maria' mi_lista # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) dicx.items() # Iterando sobre strings texto = 'Hola Mundo' for i, letra in enumerate(texto): print(i, letra) texto_s = '' for l in texto: if l == 'o': texto_s += 'x' continue texto_s += l texto_s ###Output _____no_output_____ ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code [*range(1,8,2)] # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) text = 'hola que tal ?' for p in text: print(p) ###Output h o l a q u e t a l ? ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code # 1. Leer la cantidad de nรบmeros a ingresar n = int(input("Cuantos nรบmeros desea introducir? ")) n lista_numeros = [] for num in range(n): num_input = float(input("introdusca el {} nรบmero: ".format(num+1))) lista_numeros.append(num_input) lista_numeros # MEDIA ARITMETICA = suma todos numeros / cantidad de numeros media = 0 # valor inicial for numero in lista_numeros: media = media + numero media = media / n media # soluciรณn 2 sum(lista_numeros) / n ###Output _____no_output_____ ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n = int(input("Cuantos nรบmeros desea introducir? ")) n # en python es posible multiplicar las cadenas de texto '#'*2 for i in range(1,n+1): print('#'*i) # Triangulo invertido " "*3 + "#" * 1 for i in range(1,n+1): print(" " * (n-i) + "#" * i) ###Output # ## ### #### ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n = int(input("Altura: ")) for i in range(n): print("*"*(i+1)) ###Output Altura: 8 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code n = int(input("Introduce un nรบmero entero positivo mayor que 2: ")) i = 2 while n % i != 0: i += 1 if i == n: print(str(n) + " es primo") else: print(str(n) + " no es primo") ###Output _____no_output_____ ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista_3 = [] for letra in lista_1: if letra in lista_2 and letra not in lista_3: lista_3.append(letra) print(lista_3) ###Output ['h', 'o', 'l', 'a', ' ', 'u', 'n'] ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n=4 '#'*5 ###Output _____no_output_____ ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* # para el aรฑo 2008,2009,2010 no debo entregar informe anio = 2001 while anio <= 2012: if anio in [2008,2009 ,2010]: anio += 1 continue print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 1 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. ###Code c = 0 while c <= 5: print("c vale ",c) if c == 4: print("Rompemos el bucle cuando c vale ", c) break c+=1 ###Output c vale 0 c vale 1 c vale 2 c vale 3 c vale 4 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output _____no_output_____ ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") bandera = True while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien")2 elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print(f"El resultado de la suma es: {n1+n2}") elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") #bandera = False break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Sumar dos nรบmeros 2) Restar dos nรบmeros 3) Multiplicar dos nรบmeros 4) Salir""") opcion = input() if opcion == '1': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la resta es: ",n1 - n2) elif opcion =='3': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la multiplicaciรณn es: ",n1 * n2) elif opcion =='4': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Sumar dos nรบmeros 2) Restar dos nรบmeros 3) Multiplicar dos nรบmeros 4) Salir ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for nombre in mi_lista: print(nombre) num1, num2 = (12,15) print(num1) print(num2) for indice,nombre in enumerate(mi_lista): print(indice, nombre) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio','Juan'] for indice,nombre in enumerate(mi_lista): # print(indice, nombre) if nombre == 'Juan': mi_lista[indice] = 'Maria' mi_lista # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'key1':1,'key2':2,'key3':3} for key,value in dicx.items(): if key == 'key1': dicx[key] = 10 dicx dicx.items() # Iterando sobre strings texto = 'Hola Mundo' for i, letra in enumerate(texto): print(i, letra) texto= 'Hola Mundo' texto texto_s = '' for l in texto: if l == 'o': texto_s += 'x' continue texto_s += l texto_s texto_s = '' for l in texto: texto_s = texto_s + l texto_s ###Output _____no_output_____ ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code [*range(5,0,-2)] # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print(f"Informes del Aรฑo {anio}") # por defecto el inicio de la funcion es 0 for i in range(10): print(f'#{i}') ###Output #0 #1 #2 #3 #4 #5 #6 #7 #8 #9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code # 1. Solicitar la cantidad de numeros a introducir # 2. Por cada numero a introducir, voy a preguntar por el numero y los almaceno en una lista # 3. Sumar los numeros de mi lista # 4. Dividir la suma entre la cantidad # 5. Mostrar el resultado cantidad_numeros = int(input('Ingrese la cantidad de numeros a introducir: ')) cantidad_numeros lista_numeros = [] for i in range(cantidad_numeros): x = float(input(f'Ingrese el numero {i+1}: ')) lista_numeros.append(x) lista_numeros suma_lista = 0 for num in lista_numeros: suma_lista += num suma_lista sum(lista_numeros) media_aritmetica = suma_lista/cantidad_numeros print(f'La media Aritmetica de los elementos es {media_aritmetica}') ###Output La media Aritmetica de los elementos es 14.25 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code # 1. Solicitar la altura dle triangulo # '#'*4 ###Output _____no_output_____ ###Markdown ###Code ' ' * 0 + '#' * 4 ###Output _____no_output_____ ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 #c = c+1 if c==3 or c==4: # print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code ### print("Bienvenido al menรบ interactivo") while(True): print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) dicx.items ###Output _____no_output_____ ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code [*range(1,6,2)] ##es una especie de lista # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) text= hola que tal for ###Output _____no_output_____ ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('ingrese la cantidad de numeros a introducir')) numero = [] for n in range(cantidad): x = int(input('ingrese el numero a ser sumado: ')) numero.append(x) sum(numero)/cantidad ###Output _____no_output_____ ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code q=int(input("ingrese la cantidad de #s: ")) # forma 1 for i in range(q): print('#'*(i+1)) # prueba '#'*4 # forma 2 nums=[] v=1 for elem in range(v,q+1): nums.append(v*("#")) v=v+1 for b in (nums): print (b) ###Output # ## ### #### ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code numero = int(input("ingrese un numero entero:")) numero primo = True for n in range(2, numero, 1): if numero % n ==0: primo = False break 7 % 2? 7 % 3? 7 % 4? 7 % 5? 7 % 6? if primo: print(f'el numero: {numero} es primo') else: print(f'el numero: {numero} no es primo') ###Output el numero: 6 no es primo ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] l1=set(lista_1) l2=set(lista_2) l1 l2 l3 = l1.intersection(l2) list(l3) ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(str(anio))) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: # print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while(True): print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code n1 = float(input("Introduce un nรบmero: ") ) n2 = float(input("Introduce otro nรบmero: ") ) opcion = 0 print(""" ยฟQuรฉ quieres hacer? 1) Sumar los dos nรบmeros 2) Restar los dos nรบmeros 3) Multiplicar los dos nรบmeros """) opcion = int(input("Introduce un nรบmero: ") ) if opcion == 1: print("La suma de",n1,"+",n2,"es",n1+n2) elif opcion == 2: print("La resta de",n1,"-",n2,"es",n1-n2) elif opcion == 3: print("El producto de",n1,"*",n2,"es",n1*n2) else: print("Opciรณn incorrecta") ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) ###Output val1 1 val2 2 val3 3 ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code suma = 0 numeros = int(input("ยฟCuรกntos nรบmeros quieres introducir? ") ) for x in range(numeros): suma += float(input("Introduce un nรบmero: ") ) print("Se han introducido", numeros, "nรบmeros que en total han sumado", suma, "y la media es", suma/numeros) ###Output ยฟCuรกntos nรบmeros quieres introducir? 3 Introduce un nรบmero: 1 Introduce un nรบmero: 2 Introduce un nรบmero: 3 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code m=int(input("Ingrese un numero entero: ")) i=1 while i<=m: print("#"*i) i+=1 ###Output Ingrese un numero entero: 1 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code n = int(input("Introduce un nรบmero entero positivo mayor que 2: ")) i = 2 while n % i != 0: i += 1 if i == n: print(str(n) + " es primo") else: print(str(n) + " no es primo") ###Output _____no_output_____ ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('Ingrese la cantidad de numeros a ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 8 Ingrese el numero 2: 10 Ingrese el numero 3: 4 Ingrese el numero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code n=4 '#'*5 ###Output _____no_output_____ ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] ###Output _____no_output_____ ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(str(anio))) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: # print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while(True): print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code num1 = int(input('Ingresa el primer nรบmero: ')) num2 = int(input('Ingresa el segundo nรบmero: ')) while (True): print("""ยฟQuรฉ es lo que quieres hacer? 1) Mostrar una suma de los dos nรบmeros 2) Mostrar una resta de los dos nรบmeros 3) Mostrar una multiplicaciรณn de los dos nรบmeros 4) Salir""") opcion = int(input()) if opcion == 1: print(num1 + num2) elif opcion == 2: print(num1-num2) elif opcion == 3: print(num1*num2) elif opcion == 4: print('Hasta luego') break else: print('Opciรณn invalida, vuelva a intentarlo') ###Output Ingresa el primer nรบmero: 15 Ingresa el segundo nรบmero: 2 ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) ###Output val1 1 val2 2 val3 3 ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input('ยฟCuรกntos nรบmeros desea introducir en la lista?')) i = 0 lista = [] suma = 0 while(i < cantidad): num = int(input(f'Ingresa el nรบmero {i+1}')) lista.append(num) i+=1 for indice, numero in enumerate(lista): suma += numero if indice == cantidad - 1: print(f'La media aritmรฉtica es {suma/cantidad}') break #forma rรกpida cantidad = int(input('Ingresa la cantidad de nรบmeros a introducir:' )) suma = 0 for i in range(cantidad): num = float(input(f'Ingresa el nรบmero {i+1}: ')) suma += num print(f'La media aritmรฉtica es: {suma/cantidad}') ###Output Ingresa la cantidad de nรบmeros a introducir: 4 Ingresa el nรบmero 1: 4 Ingresa el nรบmero 2: 3 Ingresa el nรบmero 3: 2 Ingresa el nรบmero 4: 1 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code tamanio = int(input('Introduce la altura del triรกngulo rectรกngulo: ')) for num in range(1,tamanio+1): print('#'*num) ###Output Introduce la altura del triรกngulo rectรกngulo: 4 ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code numero = int(input('Ingresa un nรบmero entero: ')) if numero == 2: print('El nรบmero es primo') elif numero > 2: for i in range(2,numero): if numero%i == 0: print('El nรบmero no es primo') break else: print('El nรบmero es primo') break else: print('El nรบmero no es primo') ###Output Ingresa un nรบmero entero: 7 ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] conjunto_1 = set(lista_1) conjunto_2 = set(lista_2) conjunto_3 = conjunto_1.intersection(conjunto_2) lista_3 = list(conjunto_3) print(lista_3) ###Output [' ', 'h', 'o', 'a', 'l', 'n', 'u'] ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(str(anio))) anio += 1 # anio = anio + 2 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.: ###Code c = 0 while c <= 5: c+=1 if c == 4: print("Rompemos el bucle cuando c vale", c) break print("c vale",c) ###Output c vale 1 c vale 2 c vale 3 Rompemos el bucle cuando c vale 4 ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) c = 0 while c <= 5: c+=1 if c==3 or c==4: # print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") while(True): print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print("El resultado de la suma es: ",n1+n2) elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido") a = int(input("Ingrese primer nรบmero: ")) b = int(input("Ingrese segundo nรบmero: ")) while True: print("""Elija un opciรณn 1. Sumar dos numeros 2. Restar dos numeros 3. Multiplicar dos numeros 4. Salir """) opc = input() if opc == '1': respuesta = a + b print("La suma es: ",respuesta) elif opc == '2': respuesta = a - b print("La Resta es: ",respuesta) elif opc == '3': respuesta = a * b print("La Multiplicacion es: ",respuesta) elif opc == '4': print("Hasta luego") break else: print("Ingrese un numero de la lista") while True: a = input("Ingrese primer nรบmero: ") try: a = float(a) break except: print('dato ingresado no es un numero') a ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for elemento in mi_lista: print(elemento) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for indice,nombre in enumerate(mi_lista): if nombre=='Juan': mi_lista[indice]='Maria' # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'val1':1,'val2':2,'val3':3} for key,val in dicx.items(): print(key,val) cadena = 'hola mundo' for l in cadena: print(l) ###Output h o l a m u n d o ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print("Informes del Aรฑo", str(anio)) # por defecto el inicio de la funcion es 0 for i in range(10): print(i) ###Output 0 1 2 3 4 5 6 7 8 9 ###Markdown EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code cantidad = int(input ('ingrese la cantidad de numeros que sea ingresar: ')) lista_num = [] for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) lista_num.append(num) #lista_num len(lista_num) # cantidad de elementos suma = 0 for e in lista_num: suma = suma + e suma media_aritmetica = suma / cantidad media_aritmetica # forma 2 sum(lista_num)/cantidad # forma 3 suma = 0 for i in range(cantidad): num = float(input(f'Ingrese el numero {i+1}: ')) suma = suma + num pass # termina bucle print('el promedio es: ',suma/cantidad) # forma 4 i= 1 suma = 0 while i <= cantidad: num = float(input(f'Ingrese el numero {i}: ')) suma = suma + num i +=1 print('el promedio es: ',suma/cantidad) ###Output Ingrese el numero 1: 2 Ingrese el numero 2: 3 Ingrese el numero 3: 4 Ingrese el numero 4: 5 el promedio es: 3.5 ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code numero = int(input ('ingresar numero entero: ')) for i in range(numero): for j in range(i+1): print("#", end="") print("") ###Output ingresar numero entero: 4 # ## ### #### ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code numero = int(input("Introduce un nรบmero entero positivo: ")) for i in range( 2, numero): if numero % i == 0: break if (i + 1) == numero: print(str(numero) + " es primo") else: print(str(numero) + " no es primo") ###Output Introduce un nรบmero entero positivo: 4 4 no es primo ###Markdown 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista: ###Code lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o'] lista_2 = ["h",'o','l','a',' ', 'l','u','n','a'] lista_3 = [] for letra in lista_1: if letra in lista_2 and letra not in lista_3: lista_3.append(letra) print(lista_3) ###Output ['h', 'o', 'l', 'a', ' ', 'u', 'n'] ###Markdown Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โ€œInformes del Aรฑo aรฑoโ€ ###Code # -*- coding: utf-8 -* # para el aรฑo 2008,2009,2010 no debo entregar informe # si el aรฑo es 2007 parar el bucle anio = 2001 while anio <= 2012: print("Informes del Aรฑo {}".format(anio)) anio += 1 # anio = anio + 1 # anio += 1 ###Output Informes del Aรฑo 2001 Informes del Aรฑo 2002 Informes del Aรฑo 2003 Informes del Aรฑo 2004 Informes del Aรฑo 2005 Informes del Aรฑo 2006 Informes del Aรฑo 2007 Informes del Aรฑo 2008 Informes del Aรฑo 2009 Informes del Aรฑo 2010 Informes del Aรฑo 2011 Informes del Aรฑo 2012 ###Markdown Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. ###Code c = 0 while c <= 5: print("c vale ",c) if c == 4: print("Rompemos el bucle cuando c vale ", c) break c+=1 print("Bucle finalizado !!! ") ###Output c vale 0 c vale 1 c vale 2 c vale 3 c vale 4 Rompemos el bucle cuando c vale 4 Bucle finalizado !!! ###Markdown - Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle. ###Code c = 0 while c <= 5: c+=1 if c==3 or c==4: print("Continuamos con la siguiente iteraciรณn", c) continue print("c vale",c) ###Output c vale 1 c vale 2 Continuamos con la siguiente iteraciรณn 3 Continuamos con la siguiente iteraciรณn 4 c vale 5 c vale 6 ###Markdown Ejemplo Menรบ Interactivo ###Code print("Bienvenido al menรบ interactivo") #bandera = True while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir""") opcion = input() # me devuelve un string '' if opcion == '1': print("Hola, espero que te lo estรฉs pasando bien") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print(f"El resultado de la suma es: {n1+n2}") elif opcion =='3': print("ยกHasta luego! Ha sido un placer ayudarte") #bandera = False break else: print("Comando desconocido, vuelve a intentarlo") ###Output Bienvenido al menรบ interactivo ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Saludar 2) Sumar dos nรบmeros 3) Salir ###Markdown EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta ###Code print("Bienvenido al menรบ interactivo") while True: print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn 1) Sumar de nรบmeros 2) Resta de nรบmeros 3) Multiplicacion de nรบmeros 4) Salir""") opcion = input() # me devuelve un string '' if opcion == '1': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print(f"El resultado de la suma es: {n1+n2}") elif opcion == '2': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print(f"El resultado de la resta es: {n1-n2}") elif opcion =='3': n1 = float(input("Introduce el primer nรบmero: ")) n2 = float(input("Introduce el segundo nรบmero: ")) print(f"El resultado de la multiplicacion es: {n1*n2}") elif opcion =='4': print("Gracias por usar la aplicacion") break else: print("Comando desconocido, vuelve a intentarlo") ###Output _____no_output_____ ###Markdown Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla: ###Code # Iterando sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio'] for nombre in mi_lista: print(nombre) num1, num2 = [12,15] print(num1) print(num2) for i, nombre in enumerate(mi_lista): print(i, nombre) # Modificando valores sobre listas mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio','Juan'] for indice,nombre in enumerate(mi_lista): # print(indice, nombre) if nombre == 'Juan': mi_lista[indice] = 'Maria' print(mi_lista) # valor cambiado mi_lista # Iterando sobre diccionarios dicx = {'key1':1,'key2':2,'key3':3} for key,value in dicx.items(): if key == 'key1': dicx[key] = 10 dicx dicx.items() # dicx['key1'] = 10 # Iterando sobre strings texto = 'Hola Mundo' for i, letra in enumerate(texto): print(i, letra) texto= 'Hola Mundo' texto texto[1] texto_s = '' for l in texto: texto_s += l texto_s texto_s = '' for l in texto: if l == 'o': texto_s = texto_s + 'x' continue texto_s = texto_s + l print(texto_s) texto_s ###Output H Hxl Hxla Hxla Hxla M Hxla Mu Hxla Mun Hxla Mund ###Markdown Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha: ###Code # range -> no es un lista # si quiero convetir el range a una lista debo hacer [*range(11)] [*range(2012, 2000, -1)] # -*- coding: utf-8 -*- # Generarndo un rango de valores del 2001 al 2012 for anio in range(2001, 2013): print(f"Informes del Aรฑo {anio}") # por defecto el inicio de la funcion es 0 for i in range(10): print(f'#{i}') anio = [2001,2002,2003,2004,2005,2006] while anio <= 2012: ###Output _____no_output_____ ###Markdown EJERCICIOS----------------------------- 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas ###Code # 1. Solicitar la cantidad de nรบmeros a introducir al usuario # 2. Por cada nรบmero a introducir, solicito el numero cantidad = int(input("Ingrese la cantidad de nรบmeros a introducir: ")) cantidad lista_numeros = [] for i in range(cantidad): msg = "ingrese el numero {}".format(i +1) x = int(input(msg)) # agregando el numero a la lista lista_numeros.append(x) lista_numeros # para cada nรบmero -> realizo la suma de todos los numeros sumatoria = 0 for numero in lista_numeros: sumatoria += numero print(sumatoria) sumatoria / cantidad ###Output _____no_output_____ ###Markdown 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 ###Code h = int(input("Introduce la altura del triangulo: ")) h for i in range(1, h+1): print("#" * i) "#" * 0 ###Output _____no_output_____ ###Markdown ###Code h = int(input("Introduce la altura del triangulo: ")) h for i in range(1, h+1): print(" " * (h - i) + "#" * i) " #" " ##" " " * 3 + "#" * 1 " " * 2 + "#" * 2 " " * 1 + "#" * 3 " " * 0 + "#" * 4 ###Output _____no_output_____ ###Markdown 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. ###Code numero = int(input("Introduce la altura del triangulo: ")) numero primo = True for i in range(2, numero): if numero % i ==0 : primo = False if primo: print(f"el numero {numero} es primo") else: print(f"el numero {numero} NO es primo") ###Output el numero 9 NO es primo
docs/notebooks/dispersion/two_fluid_dispersion.ipynb
###Markdown Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.two_fluid_dispersion.two_fluid_dispersion_solution.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid_dispersion_solution()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid_dispersion_solution()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012) ###Code %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from astropy.constants.si import c from matplotlib import colors from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable from plasmapy.dispersion.two_fluid_dispersion import two_fluid_dispersion_solution from plasmapy.formulary import parameters as pfp from plasmapy.particles import Particle plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5] ###Output _____no_output_____ ###Markdown Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": 45 * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],), "va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array. ###Code # compute omegas = two_fluid_dispersion_solution(**inputs) (list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape) ###Output _____no_output_____ ###Markdown Let's plot the results of each wave mode. ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] # plot plt.plot( k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast", ) ax = plt.gca() ax.plot( k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn", ) ax.plot( k_prime, np.real(omegas["acoustic_mode"] / params["wpe"]), "g.", ms=1, label="Acoustic", ) # adjust axes ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs) ax.set_yscale("log") ax.set_xscale("log") ax.set_ylim(1e-6, 2e-2) ax.tick_params( which="both", direction="in", width=1, labelsize=fs, right=True, length=5, ) # annotate text = ( f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad " f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad " f"\\theta = {inputs['theta'].value:.0f}" "^{\\circ}$" ) ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18) ax.legend(loc="upper left", markerscale=5, fontsize=fs) ###Output _____no_output_____ ###Markdown Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": np.linspace(5, 85, 100) * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],), "va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$. ###Code # compute omegas = two_fluid_dispersion_solution(**inputs) ( omegas["fast_mode"].shape, omegas["fast_mode"].shape[0] == inputs["k"].size, omegas["fast_mode"].shape[1] == inputs["theta"].size, ) ###Output _____no_output_____ ###Markdown Let's plot (the fast mode)! ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value # plot im = plt.imshow( zdata, aspect="auto", origin="lower", extent=[ np.min(k_prime.value), np.max(k_prime.value), np.min(inputs["theta"].value), np.max(inputs["theta"].value), ], interpolation=None, cmap=plt.cm.Spectral, ) ax = plt.gca() # # adjust axes ax.set_xscale("linear") ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs) ax.tick_params( which="both", direction="in", width=2, labelsize=fs, right=True, top=True, length=10, ) # Add colorbar divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="5%", pad=0.07) cbar = plt.colorbar( im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0, ) cbar.ax.tick_params( axis="x", direction="in", width=2, length=10, top=True, bottom=False, labelsize=fs, pad=0.0, labeltop=True, labelbottom=False, ) cbar.ax.xaxis.set_label_position("top") cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8) ###Output _____no_output_____ ###Markdown Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions. ###Code # define input parameters inputs = { "B": 400e-4 * u.T, "ion": Particle("He+"), "n_i": 6.358e19 * u.m ** -3, "T_e": 20 * u.eV, "T_i": 10 * u.eV, "theta": np.linspace(0, 90) * u.deg, "k": (2 * np.pi * u.rad) / (0.56547 * u.m), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]), "wci": pfp.wc_(inputs["B"], inputs["ion"]), "va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]), } params["beta"] = (params["cs"] / params["va"]).value ** 2 params["wpe"] = pfp.wp_(params["n_e"], "e-") params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2 (params["beta"], params["Lambda"]) # compute omegas = two_fluid_dispersion_solution(**inputs) # generate data for plots plt_vals = {} for mode, arr in omegas.items(): norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2 plt_vals[mode] = { "x": norm * np.sin(inputs["theta"].to(u.rad).value), "y": norm * np.cos(inputs["theta"].to(u.rad).value), } fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # Fast mode plt.plot( plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast", ) ax = plt.gca() # adjust axes ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs) ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs) ax.set_xlim(0.0, 1.5) ax.set_ylim(0.0, 2.0) for spine in ax.spines.values(): spine.set_linewidth(2) ax.minorticks_on() ax.tick_params(which="both", labelsize=fs, width=2) ax.tick_params(which="major", length=10) ax.tick_params(which="minor", length=5) ax.xaxis.set_major_locator(MultipleLocator(0.5)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_major_locator(MultipleLocator(0.5)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) # Alfven mode plt.plot( plt_vals["alfven_mode"]["x"], plt_vals["alfven_mode"]["y"], linewidth=2, label="Alfv$\`{e}$n", ) # Acoustic mode plt.plot( plt_vals["acoustic_mode"]["x"], plt_vals["acoustic_mode"]["y"], linewidth=2, label="Acoustic", ) # annotations plt.legend(fontsize=fs, loc="upper right") ###Output _____no_output_____ ###Markdown Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012) ###Code %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from astropy.constants.si import c from matplotlib import colors from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable from plasmapy.dispersion.analytical.two_fluid_ import two_fluid from plasmapy.formulary import parameters as pfp from plasmapy.particles import Particle plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5] ###Output _____no_output_____ ###Markdown Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": 45 * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],), "va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array. ###Code # compute omegas = two_fluid(**inputs) (list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape) ###Output _____no_output_____ ###Markdown Let's plot the results of each wave mode. ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] # plot plt.plot( k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast", ) ax = plt.gca() ax.plot( k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn", ) ax.plot( k_prime, np.real(omegas["acoustic_mode"] / params["wpe"]), "g.", ms=1, label="Acoustic", ) # adjust axes ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs) ax.set_yscale("log") ax.set_xscale("log") ax.set_ylim(1e-6, 2e-2) ax.tick_params( which="both", direction="in", width=1, labelsize=fs, right=True, length=5, ) # annotate text = ( f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad " f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad " f"\\theta = {inputs['theta'].value:.0f}" "^{\\circ}$" ) ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18) ax.legend(loc="upper left", markerscale=5, fontsize=fs) ###Output _____no_output_____ ###Markdown Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": np.linspace(5, 85, 100) * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],), "va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$. ###Code # compute omegas = two_fluid(**inputs) ( omegas["fast_mode"].shape, omegas["fast_mode"].shape[0] == inputs["k"].size, omegas["fast_mode"].shape[1] == inputs["theta"].size, ) ###Output _____no_output_____ ###Markdown Let's plot (the fast mode)! ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value # plot im = plt.imshow( zdata, aspect="auto", origin="lower", extent=[ np.min(k_prime.value), np.max(k_prime.value), np.min(inputs["theta"].value), np.max(inputs["theta"].value), ], interpolation=None, cmap=plt.cm.Spectral, ) ax = plt.gca() # # adjust axes ax.set_xscale("linear") ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs) ax.tick_params( which="both", direction="in", width=2, labelsize=fs, right=True, top=True, length=10, ) # Add colorbar divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="5%", pad=0.07) cbar = plt.colorbar( im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0, ) cbar.ax.tick_params( axis="x", direction="in", width=2, length=10, top=True, bottom=False, labelsize=fs, pad=0.0, labeltop=True, labelbottom=False, ) cbar.ax.xaxis.set_label_position("top") cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8) ###Output _____no_output_____ ###Markdown Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions. ###Code # define input parameters inputs = { "B": 400e-4 * u.T, "ion": Particle("He+"), "n_i": 6.358e19 * u.m ** -3, "T_e": 20 * u.eV, "T_i": 10 * u.eV, "theta": np.linspace(0, 90) * u.deg, "k": (2 * np.pi * u.rad) / (0.56547 * u.m), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]), "wci": pfp.wc_(inputs["B"], inputs["ion"]), "va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]), } params["beta"] = (params["cs"] / params["va"]).value ** 2 params["wpe"] = pfp.wp_(params["n_e"], "e-") params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2 (params["beta"], params["Lambda"]) # compute omegas = two_fluid(**inputs) # generate data for plots plt_vals = {} for mode, arr in omegas.items(): norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2 plt_vals[mode] = { "x": norm * np.sin(inputs["theta"].to(u.rad).value), "y": norm * np.cos(inputs["theta"].to(u.rad).value), } fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # Fast mode plt.plot( plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast", ) ax = plt.gca() # adjust axes ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs) ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs) ax.set_xlim(0.0, 1.5) ax.set_ylim(0.0, 2.0) for spine in ax.spines.values(): spine.set_linewidth(2) ax.minorticks_on() ax.tick_params(which="both", labelsize=fs, width=2) ax.tick_params(which="major", length=10) ax.tick_params(which="minor", length=5) ax.xaxis.set_major_locator(MultipleLocator(0.5)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_major_locator(MultipleLocator(0.5)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) # Alfven mode plt.plot( plt_vals["alfven_mode"]["x"], plt_vals["alfven_mode"]["y"], linewidth=2, label="Alfv$\`{e}$n", ) # Acoustic mode plt.plot( plt_vals["acoustic_mode"]["x"], plt_vals["acoustic_mode"]["y"], linewidth=2, label="Acoustic", ) # annotations plt.legend(fontsize=fs, loc="upper right") ###Output _____no_output_____ ###Markdown Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012) ###Code %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from astropy.constants.si import c from matplotlib import colors from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable from plasmapy.dispersion.analytical.two_fluid_ import two_fluid from plasmapy.formulary import parameters as pfp from plasmapy.particles import Particle plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5] ###Output _____no_output_____ ###Markdown Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": 45 * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.ion_sound_speed( inputs["T_e"], inputs["T_i"], inputs["ion"], ), "va": pfp.Alfven_speed( inputs["B"], inputs["n_i"], ion=inputs["ion"], ), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array. ###Code # compute omegas = two_fluid(**inputs) (list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape) ###Output _____no_output_____ ###Markdown Let's plot the results of each wave mode. ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] # plot plt.plot( k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast", ) ax = plt.gca() ax.plot( k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn", ) ax.plot( k_prime, np.real(omegas["acoustic_mode"] / params["wpe"]), "g.", ms=1, label="Acoustic", ) # adjust axes ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs) ax.set_yscale("log") ax.set_xscale("log") ax.set_ylim(1e-6, 2e-2) ax.tick_params( which="both", direction="in", width=1, labelsize=fs, right=True, length=5, ) # annotate text = ( f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad " f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad " f"\\theta = {inputs['theta'].value:.0f}" "^{\\circ}$" ) ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18) ax.legend(loc="upper left", markerscale=5, fontsize=fs) ###Output _____no_output_____ ###Markdown Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": np.linspace(5, 85, 100) * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.ion_sound_speed( inputs["T_e"], inputs["T_i"], inputs["ion"], ), "va": pfp.Alfven_speed( inputs["B"], inputs["n_i"], ion=inputs["ion"], ), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$. ###Code # compute omegas = two_fluid(**inputs) ( omegas["fast_mode"].shape, omegas["fast_mode"].shape[0] == inputs["k"].size, omegas["fast_mode"].shape[1] == inputs["theta"].size, ) ###Output _____no_output_____ ###Markdown Let's plot (the fast mode)! ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value # plot im = plt.imshow( zdata, aspect="auto", origin="lower", extent=[ np.min(k_prime.value), np.max(k_prime.value), np.min(inputs["theta"].value), np.max(inputs["theta"].value), ], interpolation=None, cmap=plt.cm.Spectral, ) ax = plt.gca() # # adjust axes ax.set_xscale("linear") ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs) ax.tick_params( which="both", direction="in", width=2, labelsize=fs, right=True, top=True, length=10, ) # Add colorbar divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="5%", pad=0.07) cbar = plt.colorbar( im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0, ) cbar.ax.tick_params( axis="x", direction="in", width=2, length=10, top=True, bottom=False, labelsize=fs, pad=0.0, labeltop=True, labelbottom=False, ) cbar.ax.xaxis.set_label_position("top") cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8) ###Output _____no_output_____ ###Markdown Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions. ###Code # define input parameters inputs = { "B": 400e-4 * u.T, "ion": Particle("He+"), "n_i": 6.358e19 * u.m ** -3, "T_e": 20 * u.eV, "T_i": 10 * u.eV, "theta": np.linspace(0, 90) * u.deg, "k": (2 * np.pi * u.rad) / (0.56547 * u.m), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]), "wci": pfp.wc_(inputs["B"], inputs["ion"]), "va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]), } params["beta"] = (params["cs"] / params["va"]).value ** 2 params["wpe"] = pfp.wp_(params["n_e"], "e-") params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2 (params["beta"], params["Lambda"]) # compute omegas = two_fluid(**inputs) # generate data for plots plt_vals = {} for mode, arr in omegas.items(): norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2 plt_vals[mode] = { "x": norm * np.sin(inputs["theta"].to(u.rad).value), "y": norm * np.cos(inputs["theta"].to(u.rad).value), } fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # Fast mode plt.plot( plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast", ) ax = plt.gca() # adjust axes ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs) ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs) ax.set_xlim(0.0, 1.5) ax.set_ylim(0.0, 2.0) for spine in ax.spines.values(): spine.set_linewidth(2) ax.minorticks_on() ax.tick_params(which="both", labelsize=fs, width=2) ax.tick_params(which="major", length=10) ax.tick_params(which="minor", length=5) ax.xaxis.set_major_locator(MultipleLocator(0.5)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_major_locator(MultipleLocator(0.5)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) # Alfven mode plt.plot( plt_vals["alfven_mode"]["x"], plt_vals["alfven_mode"]["y"], linewidth=2, label="Alfv$\`{e}$n", ) # Acoustic mode plt.plot( plt_vals["acoustic_mode"]["x"], plt_vals["acoustic_mode"]["y"], linewidth=2, label="Acoustic", ) # annotations plt.legend(fontsize=fs, loc="upper right") ###Output _____no_output_____ ###Markdown Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionality of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012) ###Code %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from astropy.constants.si import c from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable from plasmapy.dispersion.analytical.two_fluid_ import two_fluid from plasmapy.formulary import speeds from plasmapy.formulary.frequencies import gyrofrequency, plasma_frequency, wc_, wp_ from plasmapy.formulary.lengths import inertial_length from plasmapy.particles import Particle plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5] ###Output _____no_output_____ ###Markdown Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": 45 * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": speeds.ion_sound_speed( inputs["T_e"], inputs["T_i"], inputs["ion"], ), "va": speeds.Alfven_speed( inputs["B"], inputs["n_i"], ion=inputs["ion"], ), "wci": gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = inertial_length(params["n_e"], "e-") params["wpe"] = plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array. ###Code # compute omegas = two_fluid(**inputs) (list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape) ###Output _____no_output_____ ###Markdown Let's plot the results of each wave mode. ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] # plot plt.plot( k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast", ) ax = plt.gca() ax.plot( k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn", ) ax.plot( k_prime, np.real(omegas["acoustic_mode"] / params["wpe"]), "g.", ms=1, label="Acoustic", ) # adjust axes ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs) ax.set_yscale("log") ax.set_xscale("log") ax.set_ylim(1e-6, 2e-2) ax.tick_params( which="both", direction="in", width=1, labelsize=fs, right=True, length=5, ) # annotate text = ( f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad " f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad " f"\\theta = {inputs['theta'].value:.0f}" "^{\\circ}$" ) ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18) ax.legend(loc="upper left", markerscale=5, fontsize=fs) ###Output _____no_output_____ ###Markdown Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": np.linspace(5, 85, 100) * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": speeds.ion_sound_speed( inputs["T_e"], inputs["T_i"], inputs["ion"], ), "va": speeds.Alfven_speed( inputs["B"], inputs["n_i"], ion=inputs["ion"], ), "wci": gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = inertial_length(params["n_e"], "e-") params["wpe"] = plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$. ###Code # compute omegas = two_fluid(**inputs) ( omegas["fast_mode"].shape, omegas["fast_mode"].shape[0] == inputs["k"].size, omegas["fast_mode"].shape[1] == inputs["theta"].size, ) ###Output _____no_output_____ ###Markdown Let's plot (the fast mode)! ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value # plot im = plt.imshow( zdata, aspect="auto", origin="lower", extent=[ np.min(k_prime.value), np.max(k_prime.value), np.min(inputs["theta"].value), np.max(inputs["theta"].value), ], interpolation=None, cmap=plt.cm.Spectral, ) ax = plt.gca() # # adjust axes ax.set_xscale("linear") ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs) ax.tick_params( which="both", direction="in", width=2, labelsize=fs, right=True, top=True, length=10, ) # Add colorbar divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="5%", pad=0.07) cbar = plt.colorbar( im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0, ) cbar.ax.tick_params( axis="x", direction="in", width=2, length=10, top=True, bottom=False, labelsize=fs, pad=0.0, labeltop=True, labelbottom=False, ) cbar.ax.xaxis.set_label_position("top") cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8) ###Output _____no_output_____ ###Markdown Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions. ###Code # define input parameters inputs = { "B": 400e-4 * u.T, "ion": Particle("He+"), "n_i": 6.358e19 * u.m ** -3, "T_e": 20 * u.eV, "T_i": 10 * u.eV, "theta": np.linspace(0, 90) * u.deg, "k": (2 * np.pi * u.rad) / (0.56547 * u.m), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": speeds.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]), "wci": wc_(inputs["B"], inputs["ion"]), "va": speeds.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]), } params["beta"] = (params["cs"] / params["va"]).value ** 2 params["wpe"] = wp_(params["n_e"], "e-") params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2 (params["beta"], params["Lambda"]) # compute omegas = two_fluid(**inputs) # generate data for plots plt_vals = {} for mode, arr in omegas.items(): norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2 plt_vals[mode] = { "x": norm * np.sin(inputs["theta"].to(u.rad).value), "y": norm * np.cos(inputs["theta"].to(u.rad).value), } fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # Fast mode plt.plot( plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast", ) ax = plt.gca() # adjust axes ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs) ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs) ax.set_xlim(0.0, 1.5) ax.set_ylim(0.0, 2.0) for spine in ax.spines.values(): spine.set_linewidth(2) ax.minorticks_on() ax.tick_params(which="both", labelsize=fs, width=2) ax.tick_params(which="major", length=10) ax.tick_params(which="minor", length=5) ax.xaxis.set_major_locator(MultipleLocator(0.5)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_major_locator(MultipleLocator(0.5)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) # Alfven mode plt.plot( plt_vals["alfven_mode"]["x"], plt_vals["alfven_mode"]["y"], linewidth=2, label="Alfv$\`{e}$n", ) # Acoustic mode plt.plot( plt_vals["acoustic_mode"]["x"], plt_vals["acoustic_mode"]["y"], linewidth=2, label="Acoustic", ) # annotations plt.legend(fontsize=fs, loc="upper right") ###Output _____no_output_____ ###Markdown Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012) ###Code %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from astropy.constants.si import c from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable from plasmapy.dispersion.analytical.two_fluid_ import two_fluid from plasmapy.formulary import speeds from plasmapy.formulary.frequencies import gyrofrequency, plasma_frequency, wc_, wp_ from plasmapy.formulary.lengths import inertial_length from plasmapy.particles import Particle plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5] ###Output _____no_output_____ ###Markdown Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": 45 * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": speeds.ion_sound_speed( inputs["T_e"], inputs["T_i"], inputs["ion"], ), "va": speeds.Alfven_speed( inputs["B"], inputs["n_i"], ion=inputs["ion"], ), "wci": gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = inertial_length(params["n_e"], "e-") params["wpe"] = plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array. ###Code # compute omegas = two_fluid(**inputs) (list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape) ###Output _____no_output_____ ###Markdown Let's plot the results of each wave mode. ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] # plot plt.plot( k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast", ) ax = plt.gca() ax.plot( k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn", ) ax.plot( k_prime, np.real(omegas["acoustic_mode"] / params["wpe"]), "g.", ms=1, label="Acoustic", ) # adjust axes ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs) ax.set_yscale("log") ax.set_xscale("log") ax.set_ylim(1e-6, 2e-2) ax.tick_params( which="both", direction="in", width=1, labelsize=fs, right=True, length=5, ) # annotate text = ( f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad " f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad " f"\\theta = {inputs['theta'].value:.0f}" "^{\\circ}$" ) ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18) ax.legend(loc="upper left", markerscale=5, fontsize=fs) ###Output _____no_output_____ ###Markdown Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": np.linspace(5, 85, 100) * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": speeds.ion_sound_speed( inputs["T_e"], inputs["T_i"], inputs["ion"], ), "va": speeds.Alfven_speed( inputs["B"], inputs["n_i"], ion=inputs["ion"], ), "wci": gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = inertial_length(params["n_e"], "e-") params["wpe"] = plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$. ###Code # compute omegas = two_fluid(**inputs) ( omegas["fast_mode"].shape, omegas["fast_mode"].shape[0] == inputs["k"].size, omegas["fast_mode"].shape[1] == inputs["theta"].size, ) ###Output _____no_output_____ ###Markdown Let's plot (the fast mode)! ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value # plot im = plt.imshow( zdata, aspect="auto", origin="lower", extent=[ np.min(k_prime.value), np.max(k_prime.value), np.min(inputs["theta"].value), np.max(inputs["theta"].value), ], interpolation=None, cmap=plt.cm.Spectral, ) ax = plt.gca() # # adjust axes ax.set_xscale("linear") ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs) ax.tick_params( which="both", direction="in", width=2, labelsize=fs, right=True, top=True, length=10, ) # Add colorbar divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="5%", pad=0.07) cbar = plt.colorbar( im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0, ) cbar.ax.tick_params( axis="x", direction="in", width=2, length=10, top=True, bottom=False, labelsize=fs, pad=0.0, labeltop=True, labelbottom=False, ) cbar.ax.xaxis.set_label_position("top") cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8) ###Output _____no_output_____ ###Markdown Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions. ###Code # define input parameters inputs = { "B": 400e-4 * u.T, "ion": Particle("He+"), "n_i": 6.358e19 * u.m ** -3, "T_e": 20 * u.eV, "T_i": 10 * u.eV, "theta": np.linspace(0, 90) * u.deg, "k": (2 * np.pi * u.rad) / (0.56547 * u.m), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].charge_number), "cs": speeds.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]), "wci": wc_(inputs["B"], inputs["ion"]), "va": speeds.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]), } params["beta"] = (params["cs"] / params["va"]).value ** 2 params["wpe"] = wp_(params["n_e"], "e-") params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2 (params["beta"], params["Lambda"]) # compute omegas = two_fluid(**inputs) # generate data for plots plt_vals = {} for mode, arr in omegas.items(): norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2 plt_vals[mode] = { "x": norm * np.sin(inputs["theta"].to(u.rad).value), "y": norm * np.cos(inputs["theta"].to(u.rad).value), } fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # Fast mode plt.plot( plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast", ) ax = plt.gca() # adjust axes ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs) ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs) ax.set_xlim(0.0, 1.5) ax.set_ylim(0.0, 2.0) for spine in ax.spines.values(): spine.set_linewidth(2) ax.minorticks_on() ax.tick_params(which="both", labelsize=fs, width=2) ax.tick_params(which="major", length=10) ax.tick_params(which="minor", length=5) ax.xaxis.set_major_locator(MultipleLocator(0.5)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_major_locator(MultipleLocator(0.5)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) # Alfven mode plt.plot( plt_vals["alfven_mode"]["x"], plt_vals["alfven_mode"]["y"], linewidth=2, label="Alfv$\`{e}$n", ) # Acoustic mode plt.plot( plt_vals["acoustic_mode"]["x"], plt_vals["acoustic_mode"]["y"], linewidth=2, label="Acoustic", ) # annotations plt.legend(fontsize=fs, loc="upper right") ###Output _____no_output_____ ###Markdown Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.two_fluid_dispersion.two_fluid_dispersion_solution.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid_dispersion_solution()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid_dispersion_solution()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012) ###Code %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from astropy.constants.si import c from matplotlib import colors from matplotlib.ticker import MultipleLocator from mpl_toolkits.axes_grid1 import make_axes_locatable from plasmapy.dispersion.two_fluid_dispersion import two_fluid_dispersion_solution from plasmapy.formulary import parameters as pfp from plasmapy.particles import Particle plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5] ###Output _____no_output_____ ###Markdown Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": 45 * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].integer_charge), "cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],), "va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array. ###Code # compute omegas = two_fluid_dispersion_solution(**inputs) (list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape) ###Output _____no_output_____ ###Markdown Let's plot the results of each wave mode. ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] # plot plt.plot( k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast", ) ax = plt.gca() ax.plot( k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn", ) ax.plot( k_prime, np.real(omegas["acoustic_mode"] / params["wpe"]), "g.", ms=1, label="Acoustic", ) # adjust axes ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs) ax.set_yscale("log") ax.set_xscale("log") ax.set_ylim(1e-6, 2e-2) ax.tick_params( which="both", direction="in", width=1, labelsize=fs, right=True, length=5, ) # annotate text = ( f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad " f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad " f"\\theta = {inputs['theta'].value:.0f}" "^{\\circ}$" ) ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18) ax.legend(loc="upper left", markerscale=5, fontsize=fs) ###Output _____no_output_____ ###Markdown Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane. ###Code # define input parameters inputs = { "k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m, "theta": np.linspace(5, 85, 100) * u.deg, "n_i": 5 * u.cm ** -3, "B": 8.3e-9 * u.T, "T_e": 1.6e6 * u.K, "T_i": 4.0e5 * u.K, "ion": Particle("p+"), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].integer_charge), "cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],), "va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],), "wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]), } params["lpe"] = pfp.inertial_length(params["n_e"], "e-") params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-") ###Output _____no_output_____ ###Markdown Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$. ###Code # compute omegas = two_fluid_dispersion_solution(**inputs) ( omegas["fast_mode"].shape, omegas["fast_mode"].shape[0] == inputs["k"].size, omegas["fast_mode"].shape[1] == inputs["theta"].size, ) ###Output _____no_output_____ ###Markdown Let's plot (the fast mode)! ###Code fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # normalize data k_prime = inputs["k"] * params["lpe"] zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value # plot im = plt.imshow( zdata, aspect="auto", origin="lower", extent=[ np.min(k_prime.value), np.max(k_prime.value), np.min(inputs["theta"].value), np.max(inputs["theta"].value), ], interpolation=None, cmap=plt.cm.Spectral, ) ax = plt.gca() # # adjust axes ax.set_xscale("linear") ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs) ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs) ax.tick_params( which="both", direction="in", width=2, labelsize=fs, right=True, top=True, length=10, ) # Add colorbar divider = make_axes_locatable(ax) cax = divider.append_axes("top", size="5%", pad=0.07) cbar = plt.colorbar( im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0, ) cbar.ax.tick_params( axis="x", direction="in", width=2, length=10, top=True, bottom=False, labelsize=fs, pad=0.0, labeltop=True, labelbottom=False, ) cbar.ax.xaxis.set_label_position("top") cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8) ###Output _____no_output_____ ###Markdown Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions. ###Code # define input parameters inputs = { "B": 400e-4 * u.T, "ion": Particle("He+"), "n_i": 6.358e19 * u.m ** -3, "T_e": 20 * u.eV, "T_i": 10 * u.eV, "theta": np.linspace(0, 90) * u.deg, "k": (2 * np.pi * u.rad) / (0.56547 * u.m), } # a few useful plasma parameters params = { "n_e": inputs["n_i"] * abs(inputs["ion"].integer_charge), "cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]), "wci": pfp.wc_(inputs["B"], inputs["ion"]), "va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]), } params["beta"] = (params["cs"] / params["va"]).value ** 2 params["wpe"] = pfp.wp_(params["n_e"], "e-") params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2 (params["beta"], params["Lambda"]) # compute omegas = two_fluid_dispersion_solution(**inputs) # generate data for plots plt_vals = {} for mode, arr in omegas.items(): norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2 plt_vals[mode] = { "x": norm * np.sin(inputs["theta"].to(u.rad).value), "y": norm * np.cos(inputs["theta"].to(u.rad).value), } fs = 14 # default font size figwidth, figheight = plt.rcParams["figure.figsize"] figheight = 1.6 * figheight fig = plt.figure(figsize=[figwidth, figheight]) # Fast mode plt.plot( plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast", ) ax = plt.gca() # adjust axes ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs) ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs) ax.set_xlim(0.0, 1.5) ax.set_ylim(0.0, 2.0) for spine in ax.spines.values(): spine.set_linewidth(2) ax.minorticks_on() ax.tick_params(which="both", labelsize=fs, width=2) ax.tick_params(which="major", length=10) ax.tick_params(which="minor", length=5) ax.xaxis.set_major_locator(MultipleLocator(0.5)) ax.xaxis.set_minor_locator(MultipleLocator(0.1)) ax.yaxis.set_major_locator(MultipleLocator(0.5)) ax.yaxis.set_minor_locator(MultipleLocator(0.1)) # Alfven mode plt.plot( plt_vals["alfven_mode"]["x"], plt_vals["alfven_mode"]["y"], linewidth=2, label="Alfv$\`{e}$n", ) # Acoustic mode plt.plot( plt_vals["acoustic_mode"]["x"], plt_vals["acoustic_mode"]["y"], linewidth=2, label="Acoustic", ) # annotations plt.legend(fontsize=fs, loc="upper right") ###Output _____no_output_____
tests/test_9/dog_app.ipynb
###Markdown Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). ![Sample Dog Output](images/sample_dog_output.png)In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`. ###Code import numpy as np from glob import glob # load filenames for human and dog images human_files = np.array(glob("/data/lfw/*/*")) dog_files = np.array(glob("/data/dog_images/*/*/*")) # print number of images in each dataset print('There are %d total human images.' % len(human_files)) print('There are %d total dog images.' % len(dog_files)) ###Output There are 13233 total human images. There are 8351 total dog images. ###Markdown Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image. ###Code import cv2 import matplotlib.pyplot as plt %matplotlib inline # extract pre-trained face detector face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml') # load color (BGR) image img = cv2.imread(human_files[0]) # convert BGR image to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # find faces in image faces = face_cascade.detectMultiScale(gray) # print number of faces detected in the image print('Number of faces detected:', len(faces)) # get bounding box for each detected face for (x,y,w,h) in faces: # add bounding box to color image cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) # convert BGR image to RGB for plotting cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # display the image, along with bounding box plt.imshow(cv_rgb) plt.show() ###Output Number of faces detected: 1 ###Markdown Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below. ###Code # returns "True" if face is detected in image stored at img_path def face_detector(img_path): img = cv2.imread(img_path) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray) return len(faces) > 0 ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell) ###Code from tqdm import tqdm human_files_short = human_files[:100] dog_files_short = dog_files[:100] #-#-# Do NOT modify the code above this line. #-#-# ## TODO: Test the performance of the face_detector algorithm ## on the images in human_files_short and dog_files_short. human_faces = [] dog_faces = [] for human_image in human_files_short: human_faces.append(face_detector(human_image)) for dog_image in dog_files_short: dog_faces.append(face_detector(dog_image)) human_detected_faces = sum(human_faces) dog_detected_faces = sum(dog_faces) # precision # tp / tp + fp human_detected_faces / human_detected_faces + 0 correctly_identified = 100 - dog_detected_faces correctly_identified / (correctly_identified + dog_detected_faces) ###Output _____no_output_____ ###Markdown We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Test performance of anotherface detection algorithm. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). ###Code import torch import torchvision.models as models # define VGG16 model VGG16 = models.vgg16(pretrained=True) # check if CUDA is available use_cuda = torch.cuda.is_available() # move model to GPU if CUDA is available if use_cuda: VGG16 = VGG16.cuda() ###Output Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 553433881/553433881 [00:05<00:00, 102387072.20it/s] ###Markdown Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html). ###Code from PIL import Image import torchvision.transforms as transforms def VGG16_predict(img_path): ''' Use pre-trained VGG-16 model to obtain index corresponding to predicted ImageNet class for image at specified path Args: img_path: path to an image Returns: Index corresponding to VGG-16 model's prediction ''' ## TODO: Complete the function. ## Load and pre-process an image from the given img_path ## Return the *index* of the predicted class for that image transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) raw_img = Image.open(img_path) cleaned_img = transform(raw_img) float_img = cleaned_img.unsqueeze(0).cuda() predicted_idx = VGG16(float_img) predicted_idx = torch.argmax(predicted_idx) return predicted_idx # predicted class index ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not). ###Code ### returns "True" if a dog is detected in the image stored at img_path def dog_detector(img_path): ## TODO: Complete the function. idx = VGG16_predict(img_path) #print(idx) if idx >= 151 and idx <= 268: return True return False # true/false ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__ ###Code ### TODO: Test the performance of the dog_detector function ### on the images in human_files_short and dog_files_short. human_faces = [] dog_faces = [] for human_image in human_files_short: human_faces.append(dog_detector(human_image)) for dog_image in dog_files_short: dog_faces.append(dog_detector(dog_image)) human_detected_faces = sum(human_faces) dog_detected_faces = sum(dog_faces) print('Percentage of Human Faces',human_detected_faces / len(human_files_short)) print('Percentage of Dog Faces',dog_detected_faces / len(dog_files_short)) ###Output Percentage of Human Faces 0.0 Percentage of Dog Faces 1.0 ###Markdown We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`. ###Code ### (Optional) ### TODO: Report the performance of another pre-trained network. ### Feel free to use as many code cells as needed. ###Output _____no_output_____ ###Markdown --- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)! ###Code import os from torchvision import datasets import torch.utils.data as data from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True ### TODO: Write data loaders for training, validation, and test sets ## Specify appropriate transforms, and batch_sizes BATCH_SIZE = 20 transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_dir = os.path.join('dogImages','train') val_dir = os.path.join('dogImages','valid') test_dir = os.path.join('dogImages','test') train_dataset = datasets.ImageFolder(train_dir, transform=transform) val_dataset = datasets.ImageFolder(val_dir, transform=transform) test_dataset = datasets.ImageFolder(test_dir, transform=transform) train_loader = data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE) val_loader = data.DataLoader(dataset=val_dataset, shuffle=True, batch_size=BATCH_SIZE) test_loader = data.DataLoader(dataset=test_dataset, shuffle=True, batch_size=BATCH_SIZE) loaders_scratch = {'train': train_loader, 'valid':val_loader, 'test':test_loader} ###Output _____no_output_____ ###Markdown **Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below. ###Code import torch.nn as nn import torch.nn.functional as F # define the CNN architecture class Net(nn.Module): ### TODO: choose an architecture, and complete the class def __init__(self): super(Net, self).__init__() ## Define layers of a CNN self.conv1 = nn.Conv2d(3, 32, 5) # outputs (32,220,220) self.pool = nn.MaxPool2d(2,2) # first layer output after pool # (32,110,110) self.conv2 = nn.Conv2d(32,64,5) #outputs (64,106,106) # after max pool # (64,53,53) self.conv3 = nn.Conv2d(64,128,5) # after max pool # (128,24,24) self.conv4 = nn.Conv2d(128,256,5) #after max pool # (256,10,10) self.conv5 = nn.Conv2d(256,512,5) #after max pool # (512,3,3) self.dropout = nn.Dropout(0.4) self.fc1 = nn.Linear(512*3*3,2304) # 133 dog breeds # as we have 133 folders self.fc2 = nn.Linear(2304,133) def forward(self, x): ## Define forward behavior x = self.pool(F.relu(self.conv1(x))) #x = self.dropout(x) x = self.pool(F.relu(self.conv2(x))) #x = self.dropout(x) x = self.pool(F.relu(self.conv3(x))) #x = self.dropout(x) x = self.pool(F.relu(self.conv4(x))) x = self.pool(F.relu(self.conv5(x))) x = self.dropout(x) x = x.view(-1, 512*3*3) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x #-#-# You so NOT have to modify the code below this line. #-#-# # instantiate the CNN model_scratch = Net() # move tensors to GPU if CUDA is available if use_cuda: model_scratch.cuda() ###Output _____no_output_____ ###Markdown __Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below. ###Code import torch.optim as optim ### TODO: select loss function criterion_scratch = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss() ### TODO: select optimizer optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.06,weight_decay=1e-3) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`. ###Code def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path): """returns trained model""" # initialize tracker for minimum validation loss valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): # initialize variables to monitor training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(loaders['train']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() ## find the loss and update the model parameters accordingly ## record the average training loss, using something like ## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) model.zero_grad() #print(target) output = model(data) #print(output) loss = criterion_scratch(output,target) train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss)) # Backward pass. loss.backward() # Update the parameters in the optimizer. optimizer.step() if batch_idx % 100 == 0: print('Epoch %d, Batch %d loss: %.6f' % (epoch, batch_idx + 1, train_loss)) ###################### # validate the model # ###################### model.eval() for batch_idx, (data, target) in enumerate(loaders['valid']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() output = model(data) loss = criterion_scratch(output,target) ## update the average validation loss valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss)) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss )) ## TODO: save the model if validation loss has decreased if valid_loss < valid_loss_min: torch.save(model.state_dict(), save_path) valid_loss_min = valid_loss # return trained model return model # train the model model_scratch = train(10, loaders_scratch, model_scratch, optimizer_scratch, criterion_scratch, use_cuda, 'model_scratch.pt') # load the model that got the best validation accuracy #model_scratch.load_state_dict(torch.load('model_scratch.pt')) ###Output Epoch 1, Batch 1 loss: 4.889373 Epoch 1, Batch 101 loss: 4.887956 Epoch 1, Batch 201 loss: 4.885204 Epoch 1, Batch 301 loss: 4.869027 Epoch: 1 Training Loss: 4.867319 Validation Loss: 4.861140 Epoch 2, Batch 1 loss: 4.836076 Epoch 2, Batch 101 loss: 4.868313 Epoch 2, Batch 201 loss: 4.864567 Epoch 2, Batch 301 loss: 4.865156 Epoch: 2 Training Loss: 4.866021 Validation Loss: 4.858485 Epoch 3, Batch 1 loss: 4.860460 Epoch 3, Batch 101 loss: 4.853211 Epoch 3, Batch 201 loss: 4.842324 Epoch 3, Batch 301 loss: 4.832800 Epoch: 3 Training Loss: 4.834305 Validation Loss: 4.878834 Epoch 4, Batch 1 loss: 4.880487 Epoch 4, Batch 101 loss: 4.876013 Epoch 4, Batch 201 loss: 4.871992 Epoch 4, Batch 301 loss: 4.846231 Epoch: 4 Training Loss: 4.841362 Validation Loss: 4.808552 Epoch 5, Batch 1 loss: 4.899741 Epoch 5, Batch 101 loss: 4.825074 Epoch 5, Batch 201 loss: 4.785785 Epoch 5, Batch 301 loss: 4.774435 Epoch: 5 Training Loss: 4.770420 Validation Loss: 4.633053 Epoch 6, Batch 1 loss: 4.295796 Epoch 6, Batch 101 loss: 4.830859 Epoch 6, Batch 201 loss: 4.798131 Epoch 6, Batch 301 loss: 4.781902 Epoch: 6 Training Loss: 4.779203 Validation Loss: 4.819456 Epoch 7, Batch 1 loss: 4.832332 Epoch 7, Batch 101 loss: 4.686429 Epoch 7, Batch 201 loss: 4.726315 Epoch 7, Batch 301 loss: 4.706894 Epoch: 7 Training Loss: 4.718076 Validation Loss: 4.736145 Epoch 8, Batch 1 loss: 4.572536 Epoch 8, Batch 101 loss: 4.659453 Epoch 8, Batch 201 loss: 4.636588 Epoch 8, Batch 301 loss: 4.621697 Epoch: 8 Training Loss: 4.620641 Validation Loss: 4.567732 Epoch 9, Batch 1 loss: 4.688214 Epoch 9, Batch 101 loss: 4.574794 Epoch 9, Batch 201 loss: 4.546075 Epoch 9, Batch 301 loss: 4.555486 Epoch: 9 Training Loss: 4.554761 Validation Loss: 4.736269 Epoch 10, Batch 1 loss: 4.636405 Epoch 10, Batch 101 loss: 4.484521 Epoch 10, Batch 201 loss: 4.473972 Epoch 10, Batch 301 loss: 4.457046 Epoch: 10 Training Loss: 4.453424 Validation Loss: 4.554268 ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%. ###Code def test(loaders, model, criterion, use_cuda): # monitor test loss and accuracy test_loss = 0. correct = 0. total = 0. model.eval() for batch_idx, (data, target) in enumerate(loaders['test']): # move to GPU if use_cuda: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the loss loss = criterion(output, target) # update average test loss test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss)) # convert output probabilities to predicted class pred = output.data.max(1, keepdim=True)[1] # compare predictions to true label correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy()) total += data.size(0) print('Test Loss: {:.6f}\n'.format(test_loss)) print('\nTest Accuracy: %2d%% (%2d/%2d)' % ( 100. * correct / total, correct, total)) # call test function test(loaders_scratch, model_scratch, criterion_scratch, use_cuda) ###Output _____no_output_____ ###Markdown --- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch. ###Code ## TODO: Specify data loaders BATCH_SIZE = 32 transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) train_dir = os.path.join('dogImages','train') val_dir = os.path.join('dogImages','valid') test_dir = os.path.join('dogImages','test') train_dataset = datasets.ImageFolder(train_dir, transform=transform) val_dataset = datasets.ImageFolder(val_dir, transform=transform) test_dataset = datasets.ImageFolder(test_dir, transform=transform) train_loader = data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE) val_loader = data.DataLoader(dataset=val_dataset, shuffle=True, batch_size=BATCH_SIZE) test_loader = data.DataLoader(dataset=test_dataset, shuffle=True, batch_size=BATCH_SIZE) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`. ###Code import torchvision.models as models import torch.nn as nn ## TODO: Specify model architecture class MyNet(nn.Module): def __init__(self): super(MyNet, self).__init__() VGG16 = models.vgg16(pretrained=True) for param in VGG16.parameters(): VGG16.requires_grad_(False) modules = list(VGG16.children())[:-1] self.VGG16 = nn.Sequential(*modules) self.fc = nn.Linear(VGG16.fc.in_features, 133) def forward(self, images): features = self.VGG16(images) features = features.view(features.size(0), -1) features = self.fc(features) return features model_transfer = MyNet() if use_cuda: model_transfer = model_transfer.cuda() ###Output _____no_output_____ ###Markdown __Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below. ###Code criterion_transfer = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss() optimizer_transfer = optim.Adam(model_scratch.parameters()) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`. ###Code # train the model model_transfer = train(3, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, True, 'model_transfer.pt') # load the model that got the best validation accuracy (uncomment the line below) #model_transfer.load_state_dict(torch.load('model_transfer.pt')) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%. ###Code test(loaders_transfer, model_transfer, criterion_transfer, use_cuda) ###Output _____no_output_____ ###Markdown (IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model. ###Code ### TODO: Write a function that takes a path to an image as input ### and returns the dog breed that is predicted by the model. # list of class names by index, i.e. a name can be accessed like class_names[0] class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes] def predict_breed_transfer(img_path): # load the image and return the predicted breed return None ###Output _____no_output_____ ###Markdown --- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience!![Sample Human Output](images/sample_human_output.png) (IMPLEMENTATION) Write your Algorithm ###Code ### TODO: Write your algorithm. ### Feel free to use as many code cells as needed. def run_app(img_path): ## handle cases for a human face, dog, and neither ###Output _____no_output_____ ###Markdown --- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement) ###Code ## TODO: Execute your algorithm from Step 6 on ## at least 6 images on your computer. ## Feel free to use as many code cells as needed. ## suggested code, below for file in np.hstack((human_files[:3], dog_files[:3])): run_app(file) ###Output _____no_output_____
2.2) CNN Models - Test Cases.ipynb
###Markdown 2.2 CNN Models - Test CasesThe trained CNN model was performed to a hold-out test set with 10,873 images.The network obtained 0.743 and 0.997 AUC-PRC on the hold-out test set for cored plaque and diffuse plaque respectively. ###Code import time, os import torch torch.manual_seed(42) from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import transforms from matplotlib import pyplot as plt import numpy as np import pandas as pd CSV_DIR = 'data/CSVs/test.csv' MODEL_DIR = 'models/CNN_model_parameters.pkl' IMG_DIR = 'data/tiles/hold-out/' NEGATIVE_DIR = 'data/seg/negatives/' SAVE_DIR = 'data/outputs/' if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR) batch_size = 32 num_workers = 8 norm = np.load('utils/normalization.npy').item() from torch.utils.data import Dataset from PIL import Image class MultilabelDataset(Dataset): def __init__(self, csv_path, img_path, transform=None): """ Args: csv_path (string): path to csv file img_path (string): path to the folder where images are transform: pytorch transforms for transforms and tensor conversion """ self.data_info = pd.read_csv(csv_path) self.img_path = img_path self.transform = transform c=torch.Tensor(self.data_info.loc[:,'cored']) d=torch.Tensor(self.data_info.loc[:,'diffuse']) a=torch.Tensor(self.data_info.loc[:,'CAA']) c=c.view(c.shape[0],1) d=d.view(d.shape[0],1) a=a.view(a.shape[0],1) self.raw_labels = torch.cat([c,d,a], dim=1) self.labels = (torch.cat([c,d,a], dim=1)>0.99).type(torch.FloatTensor) def __getitem__(self, index): # Get label(class) of the image based on the cropped pandas column single_image_label = self.labels[index] raw_label = self.raw_labels[index] # Get image name from the pandas df single_image_name = str(self.data_info.loc[index,'imagename']) # Open image try: img_as_img = Image.open(self.img_path + single_image_name) except: img_as_img = Image.open(NEGATIVE_DIR + single_image_name) # Transform image to tensor if self.transform is not None: img_as_img = self.transform(img_as_img) # Return image and the label return (img_as_img, single_image_label, raw_label, single_image_name) def __len__(self): return len(self.data_info.index) data_transforms = { 'test' : transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm['mean'], norm['std']) ]) } image_datasets = {'test': MultilabelDataset(CSV_DIR, IMG_DIR, data_transforms['test'])} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=False, num_workers=num_workers) for x in ['test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['test']} image_classes = ['cored','diffuse','CAA'] use_gpu = torch.cuda.is_available() def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array(norm['mean']) std = np.array(norm['std']) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.figure() plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, labels, raw_labels, names = next(iter(dataloaders['test'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out) class Net(nn.Module): def __init__(self, fc_nodes=512, num_classes=3, dropout=0.5): super(Net, self).__init__() def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x def dev_model(model, criterion, phase='test', gpu_id=None): phase = phase since = time.time() dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=False, num_workers=num_workers) for x in [phase]} model.train(False) running_loss = 0.0 running_corrects = torch.zeros(len(image_classes)) running_preds = torch.Tensor(0) running_predictions = torch.Tensor(0) running_labels = torch.Tensor(0) running_raw_labels = torch.Tensor(0) # Iterate over data. step = 0 for data in dataloaders[phase]: step += 1 # get the inputs inputs, labels, raw_labels, names = data running_labels = torch.cat([running_labels, labels]) running_raw_labels = torch.cat([running_raw_labels, raw_labels]) # wrap them in Variable if use_gpu: inputs = Variable(inputs.cuda(gpu_id)) labels = Variable(labels.cuda(gpu_id)) else: inputs, labels = Variable(inputs), Variable(labels) # forward outputs = model(inputs) preds = F.sigmoid(outputs) #posibility for each class #print(preds) if use_gpu: predictions = (preds>0.5).type(torch.cuda.FloatTensor) else: predictions = (preds>0.5).type(torch.FloatTensor) loss = criterion(outputs, labels) preds = preds.data.cpu() predictions = predictions.data.cpu() labels = labels.data.cpu() # statistics running_loss += loss.data[0] running_corrects += torch.sum(predictions==labels, 0).type(torch.FloatTensor) running_preds = torch.cat([running_preds, preds]) running_predictions = torch.cat([running_predictions, predictions]) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects / dataset_sizes[phase] print('{} Loss: {:.4f}\n Cored: {:.4f} Diffuse: {:.4f} CAA: {:.4f}'.format( phase, epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2])) print() time_elapsed = time.time() - since print('Prediction complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) return epoch_acc, running_preds, running_predictions, running_labels from sklearn.metrics import roc_curve, auc, precision_recall_curve def plot_roc(preds, label, image_classes, size=20, path=None): colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k'] fig = plt.figure(figsize=(1.2*size, size)) ax = plt.axes() for i in range(preds.shape[1]): fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel()) lw = 0.2*size # Plot all ROC curves ax.plot([0, 1], [0, 1], 'k--', lw=lw, label='random') ax.plot(fpr, tpr, label='ROC-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})' ''.format(auc(fpr, tpr)), color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw) ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('False Positive Rate', fontsize=1.8*size) ax.set_ylabel('True Positive Rate', fontsize=1.8*size) ax.set_title('Receiver operating characteristic Curve', fontsize=1.8*size, y=1.01) ax.legend(loc=0, fontsize=1.5*size) ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) if path != None: fig.savefig(path) # plt.close(fig) print('saved') def plot_prc(preds, label, image_classes, size=20, path=None): colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k'] fig = plt.figure(figsize=(1.2*size,size)) ax = plt.axes() for i in range(preds.shape[1]): rp = (label[:,i]>0).sum()/len(label) precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel()) lw=0.2*size ax.plot(recall, precision, label='PR-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})' ''.format(auc(recall, precision)), color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw) ax.plot([0, 1], [rp, rp], 'k--', color=colors[(i+preds.shape[1])%len(colors)], lw=lw, label='random') ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('Recall', fontsize=1.8*size) ax.set_ylabel('Precision', fontsize=1.8*size) ax.set_title('Precision-Recall curve', fontsize=1.8*size, y=1.01) ax.legend(loc="lower left", bbox_to_anchor=(0.01, 0.1), fontsize=1.5*size) ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) if path != None: fig.savefig(path) # plt.close(fig) print('saved') def auc_roc(preds, label): aucroc = [] for i in range(preds.shape[1]): fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel()) aucroc.append(auc(fpr, tpr)) return aucroc def auc_prc(preds, label): aucprc = [] for i in range(preds.shape[1]): precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel()) aucprc.append(auc(recall, precision)) return aucprc criterion = nn.MultiLabelSoftMarginLoss(size_average=False) model = torch.load(MODEL_DIR, map_location=lambda storage, loc: storage) if use_gpu: model = model.module.cuda() # take 10s running on single GPU try: acc, pred, prediction, target = dev_model(model.module, criterion, phase='test', gpu_id=None) except: acc, pred, prediction, target = dev_model(model, criterion, phase='test', gpu_id=None) label = target.numpy() preds = pred.numpy() output = {} for i in range(3): fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel()) precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel()) output['{} fpr'.format(image_classes[i])] = fpr output['{} tpr'.format(image_classes[i])] = tpr output['{} precision'.format(image_classes[i])] = precision output['{} recall'.format(image_classes[i])] = recall outcsv = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in output.items() ])) outcsv.to_csv(SAVE_DIR+'CNN_test_output.csv', index=False) plot_roc(pred.numpy(), target.numpy(), image_classes, size=30) plot_prc(pred.numpy(), target.numpy(), image_classes, size=30) ###Output _____no_output_____ ###Markdown 2.2 CNN Models - Test CasesThe trained CNN model was performed to a hold-out test set with 10,873 images.The network obtained 0.743 and 0.997 AUC-PRC on the hold-out test set for cored plaque and diffuse plaque respectively. ###Code import time, os import torch torch.manual_seed(42) from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import transforms from matplotlib import pyplot as plt import numpy as np import pandas as pd CSV_DIR = 'data/CSVs/test.csv' MODEL_DIR = 'models/CNN_model_parameters.pkl' IMG_DIR = 'data/tiles/hold-out/' NEGATIVE_DIR = 'data/seg/negatives/' SAVE_DIR = 'data/outputs/' if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR) batch_size = 32 num_workers = 8 norm = np.load('utils/normalization.npy', allow_pickle=True).item() from torch.utils.data import Dataset from PIL import Image class MultilabelDataset(Dataset): def __init__(self, csv_path, img_path, transform=None): """ Args: csv_path (string): path to csv file img_path (string): path to the folder where images are transform: pytorch transforms for transforms and tensor conversion """ self.data_info = pd.read_csv(csv_path) self.img_path = img_path self.transform = transform c=torch.Tensor(self.data_info.loc[:,'cored']) d=torch.Tensor(self.data_info.loc[:,'diffuse']) a=torch.Tensor(self.data_info.loc[:,'CAA']) c=c.view(c.shape[0],1) d=d.view(d.shape[0],1) a=a.view(a.shape[0],1) self.raw_labels = torch.cat([c,d,a], dim=1) self.labels = (torch.cat([c,d,a], dim=1)>0.99).type(torch.FloatTensor) def __getitem__(self, index): # Get label(class) of the image based on the cropped pandas column single_image_label = self.labels[index] raw_label = self.raw_labels[index] # Get image name from the pandas df single_image_name = str(self.data_info.loc[index,'imagename']) # Open image try: img_as_img = Image.open(self.img_path + single_image_name) except: img_as_img = Image.open(NEGATIVE_DIR + single_image_name) # Transform image to tensor if self.transform is not None: img_as_img = self.transform(img_as_img) # Return image and the label return (img_as_img, single_image_label, raw_label, single_image_name) def __len__(self): return len(self.data_info.index) data_transforms = { 'test' : transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm['mean'], norm['std']) ]) } image_datasets = {'test': MultilabelDataset(CSV_DIR, IMG_DIR, data_transforms['test'])} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=False, num_workers=num_workers) for x in ['test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['test']} image_classes = ['cored','diffuse','CAA'] use_gpu = torch.cuda.is_available() def imshow(inp, title=None): """Imshow for Tensor.""" inp = inp.numpy().transpose((1, 2, 0)) mean = np.array(norm['mean']) std = np.array(norm['std']) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.figure() plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # Get a batch of training data inputs, labels, raw_labels, names = next(iter(dataloaders['test'])) # Make a grid from batch out = torchvision.utils.make_grid(inputs) imshow(out) class Net(nn.Module): def __init__(self, fc_nodes=512, num_classes=3, dropout=0.5): super(Net, self).__init__() def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x def dev_model(model, criterion, phase='test', gpu_id=None): phase = phase since = time.time() dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=False, num_workers=num_workers) for x in [phase]} model.train(False) running_loss = 0.0 running_corrects = torch.zeros(len(image_classes)) running_preds = torch.Tensor(0) running_predictions = torch.Tensor(0) running_labels = torch.Tensor(0) running_raw_labels = torch.Tensor(0) # Iterate over data. step = 0 for data in dataloaders[phase]: step += 1 # get the inputs inputs, labels, raw_labels, names = data running_labels = torch.cat([running_labels, labels]) running_raw_labels = torch.cat([running_raw_labels, raw_labels]) # wrap them in Variable if use_gpu: inputs = Variable(inputs.cuda(gpu_id)) labels = Variable(labels.cuda(gpu_id)) else: inputs, labels = Variable(inputs), Variable(labels) # forward outputs = model(inputs) preds = F.sigmoid(outputs) #posibility for each class #print(preds) if use_gpu: predictions = (preds>0.5).type(torch.cuda.FloatTensor) else: predictions = (preds>0.5).type(torch.FloatTensor) loss = criterion(outputs, labels) preds = preds.data.cpu() predictions = predictions.data.cpu() labels = labels.data.cpu() # statistics running_loss += loss.data[0] running_corrects += torch.sum(predictions==labels, 0).type(torch.FloatTensor) running_preds = torch.cat([running_preds, preds]) running_predictions = torch.cat([running_predictions, predictions]) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects / dataset_sizes[phase] print('{} Loss: {:.4f}\n Cored: {:.4f} Diffuse: {:.4f} CAA: {:.4f}'.format( phase, epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2])) print() time_elapsed = time.time() - since print('Prediction complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) return epoch_acc, running_preds, running_predictions, running_labels from sklearn.metrics import roc_curve, auc, precision_recall_curve def plot_roc(preds, label, image_classes, size=20, path=None): colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k'] fig = plt.figure(figsize=(1.2*size, size)) ax = plt.axes() for i in range(preds.shape[1]): fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel()) lw = 0.2*size # Plot all ROC curves ax.plot([0, 1], [0, 1], 'k--', lw=lw, label='random') ax.plot(fpr, tpr, label='ROC-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})' ''.format(auc(fpr, tpr)), color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw) ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('False Positive Rate', fontsize=1.8*size) ax.set_ylabel('True Positive Rate', fontsize=1.8*size) ax.set_title('Receiver operating characteristic Curve', fontsize=1.8*size, y=1.01) ax.legend(loc=0, fontsize=1.5*size) ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) if path != None: fig.savefig(path) # plt.close(fig) print('saved') def plot_prc(preds, label, image_classes, size=20, path=None): colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k'] fig = plt.figure(figsize=(1.2*size,size)) ax = plt.axes() for i in range(preds.shape[1]): rp = (label[:,i]>0).sum()/len(label) precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel()) lw=0.2*size ax.plot(recall, precision, label='PR-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})' ''.format(auc(recall, precision)), color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw) ax.plot([0, 1], [rp, rp], 'k--', color=colors[(i+preds.shape[1])%len(colors)], lw=lw, label='random') ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('Recall', fontsize=1.8*size) ax.set_ylabel('Precision', fontsize=1.8*size) ax.set_title('Precision-Recall curve', fontsize=1.8*size, y=1.01) ax.legend(loc="lower left", bbox_to_anchor=(0.01, 0.1), fontsize=1.5*size) ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size) if path != None: fig.savefig(path) # plt.close(fig) print('saved') def auc_roc(preds, label): aucroc = [] for i in range(preds.shape[1]): fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel()) aucroc.append(auc(fpr, tpr)) return aucroc def auc_prc(preds, label): aucprc = [] for i in range(preds.shape[1]): precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel()) aucprc.append(auc(recall, precision)) return aucprc criterion = nn.MultiLabelSoftMarginLoss(size_average=False) model = torch.load(MODEL_DIR, map_location=lambda storage, loc: storage) if use_gpu: model = model.module.cuda() # take 10s running on single GPU try: acc, pred, prediction, target = dev_model(model.module, criterion, phase='test', gpu_id=None) except: acc, pred, prediction, target = dev_model(model, criterion, phase='test', gpu_id=None) label = target.numpy() preds = pred.numpy() output = {} for i in range(3): fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel()) precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel()) output['{} fpr'.format(image_classes[i])] = fpr output['{} tpr'.format(image_classes[i])] = tpr output['{} precision'.format(image_classes[i])] = precision output['{} recall'.format(image_classes[i])] = recall outcsv = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in output.items() ])) outcsv.to_csv(SAVE_DIR+'CNN_test_output.csv', index=False) plot_roc(pred.numpy(), target.numpy(), image_classes, size=30) plot_prc(pred.numpy(), target.numpy(), image_classes, size=30) ###Output _____no_output_____
pytorch/170912-tutorial.ipynb
###Markdown CIFAR-10 ###Code import torch import torchvision import torchvision.transforms as transforms transform = transforms.Compose( [transforms.ToTensor(), # [0, 1] => [-1, 1] transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] ) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') %matplotlib inline import matplotlib.pyplot as plt import numpy as np def imshow(img): img = img / 2 + 0.5 npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) dataiter = iter(trainloader) images, labels = dataiter.next() print(images.size(), labels.size()) imshow(torchvision.utils.make_grid(images)) print(' '.join('%5s' % classes[labels[j]] for j in range(4))) # Define a CNN from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) # ใƒ‘ใƒฉใƒกใƒผใ‚ฟใŒใชใ„ใƒฌใ‚คใƒคใ‚‚OK๏ผŸ self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) # softmaxใฏๅซใ‚ใชใ„ return x net = Net() net import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(2): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data # minibatch # ใƒ‡ใƒผใ‚ฟใฏVariableใงๅ›ฒใ‚€ inputs, labels = Variable(inputs), Variable(labels) # ๅ„ใƒŸใƒ‹ใƒใƒƒใƒใ”ใจใซ่“„็ฉใ—ใŸๅ‹พ้…ใฏใƒชใ‚ปใƒƒใƒˆใ™ใ‚‹ # ใƒŸใƒ‹ใƒใƒƒใƒๅ˜ไฝใงใƒ‘ใƒฉใƒกใƒผใ‚ฟๆ›ดๆ–ฐใ™ใ‚‹ใŸใ‚ optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.data[0] if i % 2000 == 1999: # 2000 minibatchใ”ใจใซ่กจ็คบ print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print('Finished Training') dataiter = iter(testloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) print('Ground Truth:', ' '.join('%5s' % classes[labels[j]] for j in range(4))) outputs = net(Variable(images)) outputs _, predicted = torch.max(outputs.data, 1) predicted[1].numpy()[0] print('Predicted: ', ' '.join('%5s' % classes[predicted[j].numpy()[0]] for j in range(4))) correct = 0 total = 0 for data in testloader: images, labels = data outputs = net(Variable(images)) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total)) class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) for data in testloader: images, labels = data outputs = net(Variable(images)) _, predicted = torch.max(outputs.data, 1) c = (predicted == labels).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i] class_total[label] += 1 for i in range(10): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) ###Output Accuracy of plane : 74 % Accuracy of car : 65 % Accuracy of bird : 42 % Accuracy of cat : 12 % Accuracy of deer : 43 % Accuracy of dog : 70 % Accuracy of frog : 70 % Accuracy of horse : 47 % Accuracy of ship : 46 % Accuracy of truck : 61 %
J4A Notebook 3 - Widgets & Querying an API.ipynb
###Markdown Jupyter for Analysts - Notebook 3 Welcome to Jupyter for Analysts Notebook 3!! So by now you have queried nbgallery for notebooks, interacted with code in notebooks, found a notebook on nbgallery, and ran at least 2 notebooks! This is seriously AMAZING progress and you should be incredibly proud. Being able to use Jupyter is a huge advantage for you! We promise, there will be notebooks out there that you will want to use for your work! :) This notebook is going to dive in to more interaction with the code you are running. We will be querying ---service --- using 'widgets' . You have actually already seen a widget but probably didn't notice. That is perfectly OK! We are going to show you a couple more widgets you might see in notebooks. Let's start by looking at some basic widgets. Run the new few cells below to see widgets in action! Ok. First step, let's import the widgets in to this notebook. ###Code from IPython import display as disp from IPython.display import display, HTML from ipywidgets import widgets, Layout better_layout = {'height' : '100px', 'width':'75%' } ###Output _____no_output_____ ###Markdown Now start running the code below to play with widgets!The first one you will see is the string widget. Notice that there is a textbox for you to type in. Go ahead type something (anything you want) in the box and watch the text JUST below the box change! :) Don't worry about the actual output here. ###Code from ipywidgets import * def f(x): print(x) interact(f, x="Type stuff here") ###Output _____no_output_____ ###Markdown Thank u, next. Ok, run the code below to see a 'progress bar' widget in action. This particular progress bar is just counting up to a number, but in other notebooks this could be used to show you how much longer you have until your results are ready! Notice how it takes until the progress bar is done for the asterisk to turn into a number! ###Code import time progress_bar = widgets.FloatProgress(min=0.0, max=2.0) display(progress_bar) x = 0 while x != 2: x+=.25 progress_bar.value = x time.sleep(1) print("Complete!") ###Output _____no_output_____ ###Markdown As I'm writing this notebook, there is a big Oreo cookie discussion going on in the office. (How much cream is there in the regular, double, mega, and most stufed? Is there a ratio?) Sorry - I sidetracked. But now I am hungry for cookies! How many should I eat? Run the code below and let me know using the slider widget! ###Code widgets.FloatSlider(min=0, max=10, step=0.5,description='Number of Cookies' ) ###Output _____no_output_____ ###Markdown Okay... that's fine, but what if you let me choose a range of the number of cookies I could eat? (Run the next cell!) ###Code widgets.FloatRangeSlider(min=0, max=10, step=0.5, description='Cookie Range') ###Output _____no_output_____ ###Markdown Do you think I *actually* ate within the range of cookies you gave me? Run the next cell, and check True or False. Note: You do not have to run this cell again once you click in the check box. Just run the cell after that to see my response! ###Code f = widgets.Checkbox(description='False?:', value=False) t = widgets.Checkbox(description='True?:', value=True) display(f, t) if f.value==True: print("Are you sure?") else: print("Yeah.... probably :)") ###Output _____no_output_____ ###Markdown Exercise 4: Who's in space right now? Querying an API Take a stab at running the code below, which is broken up in to a few cells. This code goes to ---service--- and outputs the information you want (using widgets!)Okay... see how far you make it on your own. YOU CAN DO IT! Note: Not every cell will return output. Wait for the asterisk to go away, then run the next cell! ###Code #imports a library and requests access to the Open Notify API from NASA import requests people = requests.get('http://api.open-notify.org/astros.json') people = people.json() #creates widgets to prompt user for information they want from the Open Notify API name = widgets.Checkbox(description='Full Name', value=True) space_craft = widgets.Checkbox(description='Space Craft', value=True) total_people = widgets.Checkbox(description='Total Number of People in Space', value=True) #displays the widgets display(name, space_craft, total_people) #checks to see if astronaut name was picked. if it was picked, then it tells you name of astronauts currently in space if name.value==True: for person in people['people']: print("Astronaut:",person['name']) #prints a blank line print() #checks to see if space craft name was picked. if it was picked, then it tells you name of #space craft currently in space if space_craft.value == True: for person in people['people']: print("Space Craft for", person['name'], ":", person['craft']) #prints a blank line print() #checks to see if total number of people in space was picked. if it was picked, then it tells you the total number of #astronauts currently in space if total_people.value == True: print("Total Number of People in Space:", people['number']) #checks to see if no value was selected. if no value selected, an error message is printed #to ask user to select a value if total_people.value==False and space_craft.value==False and name.value==False: print("Please check an item in the widget box above before running this code cell.") ###Output _____no_output_____
scripts/notebooks/halo/GM_merger_tree_Check.ipynb
###Markdown Draw merger tree using GalaxyMaker + ConsistenTree2015. 12. 01 Functionally OK. Looks ugly: displacement dx should be more adaptive. ###Code import tree.ctutils as ctu def link_circle_up(x, y, r, ax, finish=0): """ Given two points, draw circle at the first point and link it to the second point without drawing the second point by default (so that it can repeat to build a long thread of bids). for the last point, pass the radius of the last circle to the argument 'finish' For example, fig = plt.figure() ax = fig.add_subplot(111) xpos = [1,1] & ypos = [2,4] link_circle(xpos, ypos, 10, ax) xpos = [1,2] & ypos = [4,6] link_circle(xpos, ypos, 30, ax, finish=30) fig.show() """ ax.plot(x[0], y[0], 'o', ms=r, lw=2, alpha=0.7, mfc='orange') ax.plot(x, y, '-', c='black',alpha=0.7) if finish > 0: ax.plot(x[1], y[1], 'o', ms=20, lw=2, alpha=0.7, mfc='orange') def get_xarr(n): import numpy as np arr=[] a=0 for i in range(n): a += (-1)**i * i arr.append(a) return np.asarray(arr) def recursive_tree(idx, tt, nstep, ax, x0, y0, dx, mass_unit=1e10): import tree.draw_merger_tree as dmt prgs = ctu.get_progenitors(tt, idx) i_this_gal = np.where(tt['id'] == idx) m = np.sqrt(tt[i_this_gal]["mvir"] / mass_unit) #print("IDX:", idx, "prgs: ",prgs, "mass:", m, i_this_gal) nprg = len(prgs) if nstep == 0: return else: if nprg == 0: return else: if nprg > 1: #dx *= 1.1 dx += 0.5 # print("Branch!", nprg) #xarr = get_xarr(nprg) * dx + x0 xarr = np.arange(nprg) * dx + x0 for i, x in zip(prgs, xarr): link_circle_up([x0, x], [y0, y0 + 1], m, ax) recursive_tree(i, tt, nstep - 1, ax, x, y0 + 1, dx, mass_unit=mass_unit) from tree import treemodule from tree import treeutils import pickle import numpy as np alltrees = treemodule.CTree() wdir = '/home/hoseung/Work/data/05427/' #wdir = './' is_gal = True if is_gal: # Galaxy tree tree_path = 'GalaxyMaker/Trees/' else: # halo tree tree_path = 'halo/Trees/' load_extended_tree = True if load_extended_tree: try: alltrees = pickle.load(open(wdir + tree_path + "extended_tree.pickle", "rb" )) print("Loaded an extended tree") except: load_extended_tree = False if not load_extended_tree: """ info file of each snapshot are required. """ alltrees = treemodule.CTree() alltrees.load(filename= wdir + tree_path + 'tree_0_0_0.dat') # Fix nout ----------------------------------------------------- nout_max = alltrees.data['nout'].max() alltrees.data['nout'] += 187 - nout_max print("------ NOUT fixed") alltrees.data = ctu.augment_tree(alltrees.data, wdir, is_gal=is_gal) print("------ tree data extended") def extract_main_tree(treedata, idx=None, verbose=False): """ Returns a single branch/trunk of tree following only the main progenitors. Works with both alltrees or atree. Search until no progenitor is found. Doesn't matter how long the given tree is. Only earlier snapshots are searched for. """ if idx == None: idx = treedata['id'][0] if verbose: print("No idx is given") print("idx = ", idx) nprg = 1 ind_list=[np.where(treedata['id'] == idx)[0][0]] # main progenitor = mmp. while nprg > 0: idx = ctu.get_progenitors(treedata, idx, main=True) # print(idx) ind_list.append(np.where(treedata['id'] == idx[0])[0][0]) nprg = ctu.get_npr(treedata, idx[0]) return treedata[ind_list] import matplotlib.pyplot as plt nout_fi = 187 nout_ini = 30 i_final = np.where(alltrees.data["nout"] == nout_fi) ttt_sub = alltrees.data[i_final] nouts = np.arange(nout_fi - nout_ini + 1) final_gals = ttt_sub['id'] final_gals_org = ttt_sub['Orig_halo_id'] plt.ioff() #figure(figsize=[6,6]) #ax = fig.add_subplot(211) #aexps = np.unique(alltrees.data["aexp"])[:len(nouts)] aexps = np.unique(alltrees.data["aexp"])[:-len(nouts):-1] zreds = ["%.2f" % (1/i -1) for i in aexps] import os if not os.path.isdir(wdir + "mergertrees/"): os.mkdir(wdir + "mergertrees/") for galid in final_gals: #galid = 42216 #galid = 42207 plt.clf() fig, ax = plt.subplots(1,2) fig.set_size_inches([12,6]) sidgal = str(galid).zfill(5) #print(zreds) atree = ctu.extract_a_tree(alltrees.data, galid) mtree = extract_main_tree(atree) ax[0].scatter(atree['aexp'], np.log10(atree['m']), edgecolors='none', alpha=0.3) ax[0].scatter(mtree['aexp'], np.log10(mtree['m']), edgecolors='none', alpha=0.6, facecolors='red') ax[0].set_xlim([0.15,1.1]) ax[0].set_xticks(aexps[0:151:20]) ax[0].set_xticklabels(zreds[0:151:20]) ax[0].set_title(galid) recursive_tree(galid, atree, 150, ax[1], 0, 0, 0.8, mass_unit=2e8) # y axis label (redshift) ax[1].set_ylabel("Redshift") #ax.set_xlim([-0.5,30]) ax[1].set_ylim([-5,155]) ax[1].set_yticks(range(0,151,10)) ax[1].set_yticklabels(zreds[0:151:10]) #plt.yticks(range(0,151,10), zreds[0:151:10]) ax[1].set_title(sidgal + ", " + str(atree[0]['Orig_halo_id'])) #fig.show() plt.savefig(wdir + "mergertrees/" + sidgal + '.png') #plt.close() plt.close() ###Output /usr/local/lib/python3.4/dist-packages/IPython/kernel/__main__.py:42: RuntimeWarning: divide by zero encountered in log10 /usr/local/lib/python3.4/dist-packages/IPython/kernel/__main__.py:43: RuntimeWarning: divide by zero encountered in log10 /usr/lib/python3/dist-packages/matplotlib/pyplot.py:412: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_num_figures`). max_open_warning, RuntimeWarning)
Notebooks/LoadApplicationPoints/loadCaseExample.ipynb
###Markdown Tutorial: computing load application points for CPACSThis exercise gives an overview on how to read and write CPACS data using the TiXI library and how the parametric CPACS geometry can be processed using the TiGL API and Open Cascade. The following topics are adressed:- loading CPACS data using [**TiXI**](https://github.com/DLR-SC/tixi)- extracting wing internal structre with [**TiGL API**](https://github.com/DLR-SC/tigl)- geome'try operation using [**TiGL API**](https://github.com/DLR-SC/tigl) functions- geometry operation using [**pythonOCC**](https://github.com/tpaviot/pythonocc)- writing CPACS data using [**TiXI**](https://github.com/DLR-SC/tixi) 1. Load CPACS dataFirst we import the TiXI 3 and [open](http://tixi.sourceforge.net/Doc/group__FileHandling.htmlga748c1c28c6d9ef0c80b9633ecc379672) the file `loadCaseExample.xml`: ###Code from tixi3 import tixi3wrapper # Instantiate TiXI tixi_h = tixi3wrapper.Tixi3() # Open the XML file fname = 'input.xml' error = tixi_h.open(fname) if not error: print('CPACS data set %s opended successfully.'%fname) ###Output CPACS data set input.xml opended successfully. ###Markdown Let's begin with a [schema validation](http://tixi.sourceforge.net/Doc/group__Validation.htmlgacdd3338ad8d7c0a1b8bbd50ec465b93e) before we proceed: ###Code xsd_file = 'cpacs_schema.xsd' error = tixi_h.schemaValidateFromFile(xsd_file) if not error: print('Hooray, the data set is valid to %s and we don\'t have to scold the input data provider :)'%xsd_file) ###Output Hooray, the data set is valid to cpacs_schema.xsd and we don't have to scold the input data provider :) ###Markdown [TiXI](https://github.com/DLR-SC/tixi) privdes an [online documentation](http://tixi.sourceforge.net/Doc/index.html) of the available C functions and a [Wiki](https://github.com/DLR-SC/tixi/wiki) with some examples and further explanations. In Python it is convenient to use the [`help()`](https://docs.python.org/3/library/functions.htmlhelp) command to directly get an overview of the implemented functions of the wrapped API. ###Code help(tixi_h) ###Output Help on Tixi3 in module tixi3.tixi3wrapper object: class Tixi3(builtins.object) | Methods defined here: | | __del__(self) | | __init__(self) | Initialize self. See help(type(self)) for accurate signature. | | addBooleanElement(self, parentPath, elementName, boolean) | | addBooleanElementNS(self, parentPath, qualifiedName, namespaceURI, boolean) | | addCpacsHeader(self, name, creator, version, description, cpacsVersion) | | addDoubleAttribute(self, elementPath, attributeName, number, format) | | addDoubleElement(self, parentPath, elementName, number, format) | | addDoubleElementNS(self, parentPath, qualifiedName, namespaceURI, number, format) | | addDoubleListWithAttributes(self, parentPath, listName, childName, childAttributeName, values, format, attributes, nValues) | | addExternalLink(self, parentPath, url, fileFormat) | | addFloatVector(self, parentPath, elementName, vector, numElements, format) | | addHeader(self, toolName, version, authorName) | | addIntegerAttribute(self, elementPath, attributeName, number, format) | | addIntegerElement(self, parentPath, elementName, number, format) | | addIntegerElementNS(self, parentPath, qualifiedName, namespaceURI, number, format) | | addPoint(self, pointParentPath, x, y, z, format) | | addTextAttribute(self, elementPath, attributeName, attributeValue) | | addTextElement(self, parentPath, elementName, text) | | addTextElementAtIndex(self, parentPath, elementName, text, index) | | addTextElementNS(self, parentPath, qualifiedName, namespaceURI, text) | | addTextElementNSAtIndex(self, parentPath, qualifiedName, namespaceURI, text, index) | | checkAttribute(self, elementPath, attributeName) | boolean return values from special return code is coded manually here | | checkDocumentHandle(self) | | checkElement(self, elementPath) | boolean return values from special return code is coded manually here | | cleanup(self) | | close(self) | | closeAllDocuments(self) | | create(self, rootElementName) | | createElement(self, parentPath, elementName) | | createElementAtIndex(self, parentPath, elementName, index) | | createElementNS(self, parentPath, qualifiedName, namespaceURI) | | createElementNSAtIndex(self, parentPath, qualifiedName, index, namespaceURI) | | dTDValidate(self, DTDFilename) | | declareNamespace(self, elementPath, namespaceURI, prefix) | | exportDocumentAsString(self) | | getArray(self, arrayPath, elementName, arraySize) | | getArrayDimensionNames(self, arrayPath, dimensionNames_len) | | getArrayDimensionSizes(self, arrayPath, sizes_len) | | getArrayDimensionValues(self, arrayPath, dimension, dimensionValues_len) | | getArrayDimensions(self, arrayPath) | | getArrayElementCount(self, arrayPath, elementName) | | getArrayElementNames(self, arrayPath, elementType) | | getArrayParameterNames(self, arrayPath, parameterNames_len) | | getArrayParameters(self, arrayPath) | | getArrayValue(self, array, dimSize, dimPos, dims) | | getAttributeName(self, elementPath, attrIndex) | | getBooleanAttribute(self, elementPath, attributeName) | | getBooleanElement(self, elementPath) | | getChildNodeName(self, parentElementPath, index) | | getDocumentPath(self) | | getDoubleAttribute(self, elementPath, attributeName) | | getDoubleElement(self, elementPath) | | getFloatVector(self, vectorPath, eNumber) | | getIntegerAttribute(self, elementPath, attributeName) | | getIntegerElement(self, elementPath) | | getNamedChildrenCount(self, elementPath, childName) | | getNodeType(self, nodePath) | | getNumberOfAttributes(self, elementPath) | | getNumberOfChilds(self, elementPath) | | getPoint(self, pointParentPath) | | getPrintMsgFunc(self) | | getTextAttribute(self, elementPath, attributeName) | | getTextElement(self, elementPath) | | getVectorSize(self, vectorPath) | | getVersion(self) | | open(self, xmlInputFilename, recursive=False) | | openDocument(self, xmlFilename) | | openDocumentRecursive(self, xmlFilename, oMode) | | openHttp(self, httpURL) | | openString(self, xmlImportString) | | registerNamespace(self, namespaceURI, prefix) | | registerNamespacesFromDocument(self) | | removeAttribute(self, elementPath, attributeName) | | removeElement(self, elementPath) | | removeExternalLinks(self) | | renameElement(self, parentPath, oldName, newName) | | save(self, fileName, recursive=False, remove=False) | Save the main tixi document. | If the document was opened recursively, | * 'recursive' tells to save linked nodes to their respecitve files, too. | * 'remove' tells to remove the links to external files after saving the complete CPACS inclusively all linked content to the main file. | You cannot have 'remove' without 'recursive'. | | saveAndRemoveDocument(self, xmlFilename) | | saveCompleteDocument(self, xmlFilename) | | saveDocument(self, xmlFilename) | | schemaValidateFromFile(self, xsdFilename) | | schemaValidateFromString(self, xsdString) | | schemaValidateWithDefaultsFromFile(self, xsdFilename) | | setCacheEnabled(self, enabled) | | setElementNamespace(self, elementPath, namespaceURI, prefix) | | swapElements(self, element1Path, element2Path) | | uIDCheckDuplicates(self) | | uIDCheckExists(self, uID) | | uIDCheckLinks(self) | | uIDGetXPath(self, uID) | | uIDSetToXPath(self, xPath, uID) | | updateBooleanElement(self, elementPath, boolean) | | updateDoubleElement(self, elementPath, number, format) | | updateFloatVector(self, path, vector, numElements, format) | | updateIntegerElement(self, elementPath, number, format) | | updateTextElement(self, elementPath, text) | | usePrettyPrint(self, usePrettyPrint) | | xPathEvaluateNodeNumber(self, xPathExpression) | | xPathExpressionGetTextByIndex(self, xPathExpression, elementNumber) | | xPathExpressionGetXPath(self, xPathExpression, index) | | xSLTransformationToFile(self, xslFilename, resultFilename) | | xSLTransformationToString(self, xslFilename) | | ---------------------------------------------------------------------- | Data descriptors defined here: | | __dict__ | dictionary for instance variables (if defined) | | __weakref__ | list of weak references to the object (if defined) | | ---------------------------------------------------------------------- | Data and other attributes defined here: | | lib = <CDLL 'libtixi3.so', handle 7fffcd164c00> ###Markdown We will now read the required information from the CPACS data set. Since we already know that the data set is valid, we will find all information about the expected data in the [CPACS documentation](https://cpacs.de/pages/documentation.html) (he different ways to read and interpret a schema are explained in [tutorials from this workshop](https://github.com/DLR-SL/CPACS_Seminar/tree/master/HowTos)). The structure of the [`loadApplicationPointSets`](https://www.cpacs.de/documentation/CPACS_loadCases/html/75379068-a51b-aa5b-81fa-b0d3d4e41543.htm) can be represented as the following XSD diagram:![xsdDiagram_loadReferenceLine.png](attachment:xsdDiagram_loadReferenceLine.png)First we want to check whether the optional [`loadReferenceLine`](https://www.cpacs.de/documentation/CPACS_loadCases/html/7a985b67-4f8a-bd38-f9bf-e2e20606e591.htm) exists and import the coordinates of its nodes.*Note: For the sake of simplicity, we make the assumptions that we expect our data in the first `loadApplicationPointSet` and that we can only process relative coordinates of the `loadReferenceLine`. Detailed error information (e.g., `else` conditions), as it should be considered in a tool implementation, is neglected as well.* ###Code # Look for the first point set xpath = '/cpacs/vehicles/aircraft/model[1]/analyses/global/loadApplicationPointSets/loadApplicationPointSet[1]' if tixi_h.checkElement(xpath): # Extract the component uID which points to the corresponding component segment componentUID = tixi_h.getTextElement(xpath+'/componentUID') # Check whether a reference line is given xpath += '/loadReferenceLine' if tixi_h.checkElement(xpath): point_list = [] # Read the point list for i in range(tixi_h.getNumberOfChilds(xpath)): # If <eta> is given, then relative coordinates can be expected point_xpath = xpath+'/loadReferencePoint[%i]'%(i+1) if tixi_h.checkElement(point_xpath+'/eta'): # Obligatory elements eta = tixi_h.getDoubleElement(point_xpath+'/eta') xsi = tixi_h.getDoubleElement(point_xpath+'/xsi') referenceUID = tixi_h.getTextElement(point_xpath+'/referenceUID') # <relHeight> is optional, so we set False to indicate that the value is not given if tixi_h.checkElement(point_xpath+'/relHeight'): relHeight = tixi_h.getDoubleElement(point_xpath+'/relHeight') else: relHeight = False point_list.append([eta,xsi,relHeight,referenceUID]) print('Reference axis points:',*point_list, sep = "\n") ###Output Reference axis points: [0.0, 0.3, False, 'D150_iLOADS_W1_CompSeg1'] [0.12, 0.3, 0.5, 'D150_iLOADS_W1_CompSeg1'] [1.0, 0.4, 0.5, 'D150_iLOADS_W1_CompSeg1'] ###Markdown We now extracted a list of points defining a reference line. In a next step we want to compute the intersection of this reference line with a the ribs to specify proper load application points for structural analysis. 2. Intersection of reference line with ribs We will use the TiGL API for the basic geometry handling. The example will furthermore illustrate how to use Opencascade for individual geometry operations in case they are not implemented in TiGL. 2.1 Using TiGL to extract the geometry of wing, ribs and sparsLet's import `tigl3` and create an instance of TiGL3 class. ###Code from tigl3 import tigl3wrapper # Create instance of TiGL tigl_h = tigl3wrapper.Tigl3() tigl_h.open(tixi_h, '') ###Output _____no_output_____ ###Markdown Load the configuration manager from `tigl3.configuration`: ###Code import tigl3.configuration # Load TiGL configuration manager and uID manager mgr = tigl3.configuration.CCPACSConfigurationManager_get_instance() aircraft_config = mgr.get_configuration(tigl_h._handle.value) uid_mgr = aircraft_config.get_uidmanager() ###Output _____no_output_____ ###Markdown Get the wing by its `uID`, the component segment by its index and retrieve the corresponding internal structure: ###Code wing = uid_mgr.get_geometric_component('D150_iLOADS_W1') component_segment = wing.get_component_segment(1) wing_structure = component_segment.get_structure() ###Output _____no_output_____ ###Markdown Now we can extract spars and ribs: ###Code # List of spars spars = [] for i in range(wing_structure.get_spar_segment_count()): spars.append(wing_structure.get_spar_segment(i+1)) # List of rib sets and rib faces rib_sets = [] rib_faces = [] for i in range(wing_structure.get_ribs_definition_count()): print('reading rib set #%i ...'%(i+1)) rib_set = wing_structure.get_ribs_definition(i+1) rib_sets.append(rib_set) for j in range(rib_set.get_number_of_ribs()): rib_faces.append(rib_set.get_rib_face(j+1)) print("\nDone with reading %i rib faces from %i rib sets!"%(len(rib_faces),i)) ###Output reading rib set #1 ... reading rib set #2 ... reading rib set #3 ... reading rib set #4 ... reading rib set #5 ... reading rib set #6 ... reading rib set #7 ... Done with reading 31 rib faces from 6 rib sets! ###Markdown Let's plot the result using the Open Cascade viewer. If you want to enter the event loop, i.e. using the mouse to modify the view, uncomment `start_display()`: ###Code from OCC.Display.SimpleGui import init_display display, start_display, add_menu, add_function_to_menu = init_display() display.DisplayShape(wing.get_lower_shape(), transparency=0.5, update=True) display.DisplayShape(wing.get_upper_shape(), transparency=0.5, update=True) for spar in spars: display.DisplayShape(spar.get_spar_geometry(), color="blue", update=True) for i, rib_set in enumerate(rib_sets): display.DisplayShape(rib_set.get_ribs_geometry(), color="blue", update=True) # uncomment to enter the event loop # start_display() ###Output INFO:OCC.Display.backend:backend loaded: qt-pyqt5 INFO:OCC.Display.SimpleGui:GUI backend set to: qt-pyqt5 Layer manager created Layer dimensions: 1024, 768 Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. Many colors for color name blue, using first. ###Markdown The result should look like this:![ribs_spars1.png](attachment:ribs_spars1.png) 2.2 Converting relative component segment coordinates to absolute coordinates using the TiGL API There is no direct TiGL method to translate the relative component segment coordinates to absolute coordinates. But we can use the TiGL API to write our own function: ###Code from OCC.gp import gp_Pnt def get_abs_pnt(eta, xsi, relHeight, compUID): # Get uIDs of the corresponding wing and segment wing_uid, segm_uid = tigl_h.wingComponentSegmentPointGetSegmentEtaXsi(compUID,eta,xsi)[0:2] # Get the wing and segment index from its uID wing_index = tigl_h.wingGetIndex(wing_uid) segm_index = tigl_h.wingGetSegmentIndex(segm_uid)[0] if not relHeight: # TiGL returns absolute point coordinates on the wing chord face at eta, xsi of the segment pnt = tigl_h.wingGetChordPoint(wing_index,segm_index,eta,xsi) else: # Compute the unit normal vector to the chord face chord_normal = np.array(tigl_h.wingGetChordNormal(wing_index, segm_index, eta, xsi)) e = chord_normal/np.linalg.norm(chord_normal) # Get the upper and lower intersection with the wing surface p_up = np.array(tigl_h.wingGetUpperPointAtDirection(wing_index, segm_index, eta, xsi, e[0], e[1], e[2])[0:3]) p_lo = np.array(tigl_h.wingGetLowerPointAtDirection(wing_index, segm_index, eta, xsi, e[0], e[1], e[2])[0:3]) # Translate the relHeight parameter into point coordinates dist = np.linalg.norm(p_up-p_lo) pnt = p_lo + relHeight*dist*e # Return the result as gp_Pnt return gp_Pnt(*pnt) ###Output _____no_output_____ ###Markdown We call the above function for each point in our point list and write the results into a new list `abs_points`: ###Code comp_uid = component_segment.get_uid() abs_points = [] for point in point_list: abs_points.append(get_abs_pnt(*point)) ###Output _____no_output_____ ###Markdown Using the `BRepBuilderAPI_MakeEdge` class we construct edges between the nodes: ###Code from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeEdge edges = [] for i in range(len(abs_points)-1): edges.append(BRepBuilderAPI_MakeEdge(abs_points[i],abs_points[i+1])) ###Output _____no_output_____ ###Markdown Let's plot the `loadReferenceLine` composed of the three point coordinates and the two edges with green color: ###Code for pnt in abs_points: display.DisplayShape(pnt, color="green", update=True) for edge in edges: display.DisplayShape(edge.Edge(), color="green", update=True) # uncomment to enter the event loop # start_display() ###Output _____no_output_____ ###Markdown ![ribs_spars2.png](attachment:ribs_spars2.png) 2.3 Intersection of ribs and reference line using pythonOCCWe now have a shape for each of the ribs and the reference line. Next, we will write a routine to combine the edges to a curve and intersect this curve with the ribs. For this we will use the pythonOCC library directly. *Note: Threre is a [API documentation](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/) of pythonOCC. Furthermore, it is recommended to have a look at the [Python demos](https://github.com/tpaviot/pythonocc-demos) or the [C++ documentation](https://www.opencascade.com/doc/occt-6.9.1/refman/html/index.html) from which the Python functions are derived. Again, the `help()` command is very useful to get an overview of the possible member functions of a certain class.*The pythonOCC library usually offers several ways to get to the desired solution. In our case, we first combine the edges to a wire with [BRepBuilderAPI_MakeWire](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.BRepBuilderAPI.html?highlight=brepbuilderapi_makewireOCC.BRepBuilderAPI.BRepBuilderAPI_MakeWire): ###Code from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeWire wire_h = BRepBuilderAPI_MakeWire(edges[0].Edge(), edges[1].Edge()) wire = wire_h.Wire() ###Output _____no_output_____ ###Markdown From this wire we derive the curve in form of a spline with C0 continuity using [Approx_Curve3d](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.Approx.html?highlight=approx_curve3dOCC.Approx.Approx_Curve3d): ###Code from OCC.BRepAdaptor import BRepAdaptor_CompCurve, BRepAdaptor_HCompCurve from OCC.GeomAbs import GeomAbs_C0 from OCC.Approx import Approx_Curve3d wireAdaptor = BRepAdaptor_CompCurve(wire) curveAdaptor = BRepAdaptor_HCompCurve(wireAdaptor) approx = Approx_Curve3d(curveAdaptor.GetHandle(), 1e-7, GeomAbs_C0, 5, 12) curve = approx.Curve() ###Output _____no_output_____ ###Markdown In the next step we use the [`GeomAPI_IntCS`](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.GeomAPI.html?highlight=geomapi_intcsOCC.GeomAPI.GeomAPI_IntCS) class to intersect the curve with the rib surfaces. Therefore, the rib faces are converted to surfaces via [`BRep_Tool`](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.BRep.html?highlight=brep_toolOCC.BRep.BRep_Tool): ###Code from OCC.GeomAPI import GeomAPI_IntCS from OCC.BRep import BRep_Tool intersector = GeomAPI_IntCS() intersec_pnts = [] for rib_face in rib_faces: face = BRep_Tool.Surface(rib_face) intersector.Perform(curve, face) for i in range(intersector.NbPoints()): intersec_pnts.append(intersector.Point(i+1)) ###Output _____no_output_____ ###Markdown Let's plot the results as red points: ###Code for pt in intersec_pnts: display.DisplayShape(pt, color="red", update=True) # uncomment to enter the event loop start_display() ###Output _____no_output_____ ###Markdown It should look like this: ![ribs_spars3.png](attachment:ribs_spars3.png) 3. Write load application points to CPACS We have now intersected our reference line with the internal wing structure and thus determined the load application points. Now let's write the results back to CPACS.A look into the [online documentation](https://www.cpacs.de/documentation/CPACS_loadCases/html/378cea43-6e5e-7f71-d037-9cc342ad0a05.htm) reveals the following data structure for this:```XML 1;2;... ..;.. ..;.. ..;.. ```![xsd_loadApplicationPoints.png](attachment:xsd_loadApplicationPoints.png) We could see from the documentation that the [coordinates](https://www.cpacs.de/documentation/CPACS_loadCases/html/8cbb5e0f-58aa-65db-8086-40d31330082d.htm) of the [load application points](https://www.cpacs.de/documentation/CPACS_loadCases/html/bf3688ab-60ba-701f-51d4-a76b2ab062ff.htm) are stored as [stringVectorBaseType](https://www.cpacs.de/documentation/CPACS_loadCases/html/32aea7db-266a-6dfa-5d16-f9b63c1e62a8.htm). Therefore we first create corresponding lists in Python:(*Note: documentation links must be adopted to the new proposal once released*) ###Code id_vec, x_vec, y_vec, z_vec = [],[],[],[] for i, pnt in enumerate(intersec_pnts): id_vec.append(i+1) x,y,z = pnt.Coord() x_vec.append(x) y_vec.append(y) z_vec.append(z) ###Output _____no_output_____ ###Markdown Using TiXI we [create](http://tixi.sourceforge.net/Doc/group__Elements.htmlga48de468f8e6b82bafff8465bed229068) the `loadApplicationPoints` element and [add the corresponding vectors](http://tixi.sourceforge.net/Doc/group__Elements.htmlgab3d822acc72ee8e15b5c43140db1de53): ###Code # Create a childe-element 'loadApplicationCoordinates' parentPath = '/cpacs/vehicles/aircraft/model[1]/analyses/global/loadApplicationPointSets/loadApplicationPointSet[1]' tixi_h.createElement(parentPath, 'loadApplicationPoints') # Add coordinate vectors parentPath += '/loadApplicationPoints' tixi_h.addFloatVector(parentPath, 'pointIDs', id_vec, len(id_vec), '%g') tixi_h.addFloatVector(parentPath, 'x', x_vec, len(x_vec), '%.5f') tixi_h.addFloatVector(parentPath, 'y', y_vec, len(y_vec), '%.5f') tixi_h.addFloatVector(parentPath, 'z', z_vec, len(z_vec), '%.5f') ###Output _____no_output_____ ###Markdown Finally the results are [written](http://tixi.sourceforge.net/Doc/group__FileHandling.htmlgaf1bedd335ae49ba7dc69836720b00372) to `output.xml`: ###Code fname = 'output.xml' error = tixi_h.saveDocument(fname) if not error: print("Data written successfully to %s."%fname) ###Output Data written successfully to output.xml.
Chapter 0 - Foundations of Python/Flow control and looping.ipynb
###Markdown Flow control and looping *If...else* statement can be found in most mainstream programming languages to control the flow of your program's execution. With your predescribed conditions, your program is capable of handling different circumstances by corresponding treatment.The basic syntax is: if condition_expression: statement(s) elif condition_expression: statement(s) else: statement(s) Both elif and else are optional and multiple elif is allowed; it also provides short hand syntax for that: if condition_expression: statementor statement if condition_expression else statementRemember it, there is no curly-brackets in Python syntax for that and go checking the indentation if errors happened.Nested *if...else* is allowed that is using *if...else* as a response statement while a particular condition_expression is true.In order to judge the logical conditions from mathematics, Python uses the following symbols:- Equals: a==b- Not equals: a!=b- Less than or equal to: a<=b- Less than: a<b- Greater than or equal to: a>=b- great than: a>bSome logical operators can let you make a complex condition_expression by combining saveral simple ones:- *and* : expression1 and expression2- *or* : expression1 or expression2 ###Code math=4 english=3 if english < 4: print('extra English class') #output>>extra English class # Any subject less than 4 should have extra class #option 1 if english < 4 or math <4: print('extra class') #output>>extra class #option 2 if english<4: print('extra class') elif math<4: print('extra class') #output>>extra class print('extra class') if english<4 or math<4 else print('go playing your video game') #output>>extra class ###Output extra English class extra class extra class extra class ###Markdown for and while Loop for LoopThe for loop is applied for iterating over a iterable objects or traversal like we usually do in C/C++.The basic syntax is: for val in sequence: content of this loop else: statement for else part First of all, *else* part is just optional and rarely used.*sequence* can be a iterable object such as list or string, or a range() function.You can generate numbers from 0 to 9 by using range(10) or range(0,10,1) (start=0, end by=10, step size=1) ###Code # The following three loops come out the same result: print numbers from 0 to 9 sequentially re=[] for i in range(10): re.append(i) print(re) re.clear() for i in range(0,10,1): re.append(i) print(re) a=list(range(10)) print(a) re.clear() for i in a: re.append(i) print(re) ###Output [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ###Markdown while LoopThe while loop is applied for iterating over a section of code until the testing condition is satisfied (being True). For those cases that you don't know an exact number of iterations for the repeating jobs, while loop might be a good choice.The basic syntax is: while testing_condition: statement(S) for while loop else: statement for else part The condition will be checked first, and any non-zero value would be considerred as *True*; also, None and 0 are interpreted as *False*. Same as for loop, the *else* part is optional and only execute once while the testing condition is False. ###Code re=[] n=10;i=1 while i<n: re.append(i) i=i+1 #(can also be expressed by i+=1) print(re) ###Output [1, 2, 3, 4, 5, 6, 7, 8, 9] ###Markdown break, continue, and pass*Break* can apllied for both for and while loop, you just put it in the statement (often found with if statement) and it will redirect your program to the outisde of the loop. In contrast to *break*, *contiune* will keep your program to stay in the loop. Finally, if you are looking for a statement which is going to do nothing, using *pass*. ###Code re=[] for i in range(10): re.append(i) if i==6: print("i becomes",i," now, let's take a break!") break elif i==3: print('i has been 3') pass else: continue print(re) ###Output i has been 3 i becomes 6 now, let's take a break! [0, 1, 2, 3, 4, 5, 6] ###Markdown Iterating through tuple, list, string, and dictionary*enumerate()* is a very useful function we have to introduce in this part. It returns an enumerate object. It contains the index and value of all the items as a tuple. ###Code S='abcd' T=('a','b','c','d') L=['a','b','c','d'] D=dict(zip(range(4),L)) # the following example adopts for sring, list, and tuple # all the outputs are all the same res=[] #Case 1 for i in S: res.append(i) print('Result of Case 1: ', res) res.clear() #Case 2 for i in enumerate(S): res.append(i) print('Result of Case 2: ', res) res.clear() # for dictionary #Case 4 for i in D: res.append((i,D[i])) print('Result of Case 4: ',res) res.clear() #Case 5 for i in enumerate(D): #be carefull! enumerate() will pair keys and index res.append(i) print('Result of Case 5: ',res) res.clear() #Case 6 for i in D.items(): res.append(i) print('Result of Case 6: ',res) ###Output Result of Case 1: ['a', 'b', 'c', 'd'] Result of Case 2: [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')] Result of Case 4: [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')] Result of Case 5: [(0, 0), (1, 1), (2, 2), (3, 3)] Result of Case 6: [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')]
DAY 401 ~ 500/DAY440_[BaekJoon] ๋Œ€์ถฉ ๋”ํ•ด (Python).ipynb
###Markdown 2021๋…„ 8์›” 1์ผ ์ผ์š”์ผ BaekJoon - ๋Œ€์ถฉ ๋”ํ•ด (Python) ๋ฌธ์ œ : https://www.acmicpc.net/problem/8949 ๋ธ”๋กœ๊ทธ : https://somjang.tistory.com/entry/BaekJoon-8949%EB%B2%88-%EB%8C%80%EC%B6%A9-%EB%8D%94%ED%95%B4-Python Solution ###Code def daechung_sum(num1, num2): result = [] num1_len, num2_len = len(num1), len(num2) if num1_len > num2_len: num2 = '0' * (num1_len - num2_len) + num2 else: num1 = '0' * (num2_len - num1_len) + num1 max_len = max([num1_len, num2_len]) for i in range(max_len): result.append(str(int(num1[i]) + int(num2[i]))) return "".join(result) if __name__ == "__main__": num1, num2 = input().split() print(daechung_sum(num1, num2)) ###Output _____no_output_____
work/jupyter/new_feature.ipynb
###Markdown Assignment expresions(PEP572) ใ‚ปใ‚คใ‚ฆใƒๆผ”็ฎ—ๅญ :=- python3.8ใฎๆฆ‚่ฆ๏ผˆใใฎ1๏ผ‰ - Assignment expressions - https://atsuoishimoto.hatenablog.com/entry/2019/09/03/110508- ๅคๆฅใ€pythonใงใฏไปฃๅ…ฅใฏๆ–‡ใงใ‚ใ‚‹ใจใ•ใ‚ŒใŸ - a = 100- ใ‚ปใ‚คใ‚ฆใƒๆผ”็ฎ—ๅญใงใฏใ€ไปฃๅ…ฅใซๆผ”็ฎ—ใ‚’ๅ…ฅใ‚Œใ‚‰ใ‚Œใ‚‹ - a = (b := 50) + 50- ใŒใงใใ‚‹ ###Code a = (b := 50) + 50 print(a) ###Output 100 ###Markdown Positional-only parameters(PEP570) ้–ขๆ•ฐใฎไฝ็ฝฎๅฐ‚็”จๅผ•ๆ•ฐ- python3.8ใฎๆฆ‚่ฆ๏ผˆใใฎ2๏ผ‰ - - https://atsuoishimoto.hatenablog.com/entry/2019/09/06/115651- python3.0ใงใ‚ญใƒผใƒฏใƒผใƒ‰ๅฐ‚็”จๅผ•ๆ•ฐใ‚’"*"ใ‚’ๅˆฉ็”จใ—ใฆๆŒ‡ๅฎšใงใใ‚‹ใ‚ˆใ†ใซ -> 1 - python3.8ใงไฝ็ฝฎๅฐ‚็”จๅผ•ๆ•ฐใ‚’"/"ใ‚’ๅˆฉ็”จใ—ใฆๆŒ‡ๅฎšใงใใ‚‹ใ‚ˆใ†ใซใชใ‚Šใพใ—ใŸ -> 2 ###Code # 1 def func(a, b, *, c=1, d=2): return a + b + c + d func(1,2) func(1,2,10) def func1(a, b, /, *, c, d): return a + b + c + d func1(1,2,10,12) func1(1, b=2) ###Output _____no_output_____ ###Markdown Parallel filesystem cache for compliled bytecode files- ใ‚ใ‹ใฃใŸใ‚‰ f-strings support = for self-documenting expressions and debugging- f-stringใง"="ใŒใ‚ตใƒใƒผใƒˆใ•ใ‚Œใ‚‹ใ‚ˆใ†ใซ - ใ‚ใ‚“ใพ่ชฟในใฆใพใ›ใ‚“ใŒใ€f-string้–ขไฟ‚ใ‚ˆใๅค‰ๆ›ดใ‚ใ‚‹ใชใ- ๅผใฎ็ตๆžœใ‚‚ใ‚ตใƒใƒผใƒˆ ###Code from datetime import date user = "eric_idle" member_since = date(1975, 7,31) f"{user=} {member_since=}" memb_days = date.today() - member_since f"{user=} {memb_days=}" ###Output _____no_output_____ ###Markdown PickleใŒ5ใซ ###Code a = [1,2,3,4,5] for i in reversed(a): print(i) dict_a = {1: "test", 2: "test2", 3: "test3"} for i in reversed(dict_a): print(dict_a[i]) [(10, 20) (30, 40)] cast ={input("role? "): input("actor? ")} cast type(True) ###Output _____no_output_____
2_2_qiskit-quantum-state-classifier_circuits_and_computations.ipynb
###Markdown Classification of quantum states with high dimensional entanglement Circuits and computationsVersion compatible with 1st and 2d pilot studies ###Code import numpy as np import copy from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble from qiskit.tools.visualization import * from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal, CompleteMeasFitter, TensoredMeasFitter) import json from scipy.signal import savgol_filter import time from qiskit.tools.monitor import job_monitor from o_utils import ora # classifier utilities from o_plot import opl # utilities for result plot from c_utils import new_cut # circuit building utilities def json_dic_loader(dic_name): f = open(data_directory+dic_name+'.json') return json.load(f) ###Output _____no_output_____ ###Markdown markdown for safety on demodef json_dic_dumper(dic, dic_name): with open(data_directory+dic_name+'.json', 'w') as f: json.dump(dic,f) ###Code # common code for calling the classifier for ideal device and for real devices def add_single_dic(target_data_list): start_time = time.time() print("started",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name, "mitigation",mit_str,o_metric,model_name) # added for D,S,M choice. Mainstream : mixed set of 20 states first = 0 last = nb_states if unique_char == "D": last = int(nb_states/2) elif unique_char == "S": first = int(nb_states/2) # get the classifier error curve in function of the number of shot and the "safe shot number" error_curve, safe_rate, ernb = ora.provide_error_curve(PD_model=model_dic[model_name][first:last,:], PD_test=PD_test[first:last,:], trials=trials, window=window, epsilon=epsilon, max_shots=max_shots, pol=pol, verbosality=verbosality) tail = savgol_filter(ernb, window, pol, axis=0) len_curve = len(error_curve) safe_shot_nb = len_curve - int((window-1)/2) # OK print('safe_shot_nb',safe_shot_nb, 'safe_rate',safe_rate, "nb trials:",trials) err_rates = tail[int((window-1)/2),:]/trials err_rate_max = np.max(err_rates) err_rate_min = np.min(err_rates) r=4 print("savgol interpolated error rate mean:", np.round(np.mean(err_rates),r), "min:", np.round(err_rate_min,r), "max:", np.round(err_rate_max,r), "for", [ien for ien, jen in enumerate(err_rates) if jen == err_rate_max]) end_time = time.time() #save the data in a list of dictionaries : single_dic={"project":mitig_name, "id_gates":id_gates, "mitigation":mit_str, "model":model_name, "metric":o_metric, "device":project_device, "curve_length":len_curve, "shots": safe_shot_nb, "shots_rate": safe_rate, "error_curve":error_curve, "trials":trials,"window":window, "epsilon":epsilon,"SG_pol": pol, "computation_time":end_time-start_time, "time_completed":time.strftime('%d/%m/%Y %H:%M:%S'), "trials":trials, "QV": QV_dic[project_device], "fidelity": fidelity_dic[project_device], "error_nb":ernb} target_data_list.append(single_dic) print("completed",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name, "mitigation",mit_str,o_metric,model_name,"\n") ###Output _____no_output_____ ###Markdown Set up the simulator and layout for 5 qubits ###Code simulator = Aer.get_backend('qasm_simulator') #specify the layout of the devices used_qubits = 5 qubit_list = [0,1,2,3,4] #short_version = False #program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+ program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+ Flag_char = "DS" # this for a mix of two types of separable states if len(Flag_char) >= 2: unique_char = "M" else: unique_char = Flag_char # These dictionaries for the devices used in the study if program_name == "QAD": fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380, "ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ideal_device': 1.0} data_directory = "data_files/" elif program_name == "AL2": fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974, "ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ideal_device': 1.0} data_directory = "data2_files/" QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0, "ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf} dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our", "ibmqx2": 'Yor', 'ideal_device': "Ide"} # specify the device: here first the ideal noise-free device project_device = 'ideal_device' device_name = dev_dic[project_device] # specify the nb of id gates between state creation and measurements # zero for the ideal device id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) # tail of the file names for RAM storage mitig_name = program_name + "_" + device_name project_name = mitig_name + "_" + unique_char + zfilled print(mitig_name) print(project_name) # establish the result label list # meas_calibs will be used for mitigation in the real device section qr = QuantumRegister(used_qubits) meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal') nb_labels=len(label_list) print(nb_labels,label_list) len(meas_calibs) # permutation list # here it is simple to write down the list, # but a version using itertools will be wellcome for >5 qubits projects if used_qubits == 5: q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3], [0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]] else: print("work in progress - meanwhile please provide the list of permutations") ###Output _____no_output_____ ###Markdown Create the quantum states ###Code # define the two subsets of 10 separable states if program_name == "QAD": state_1a = ["W","Phi+"] state_1b = ["GHZ","Psi+"] elif program_name == "ALT" or "AL2": state_1a = ["W","Psi+"] state_1b = ["Wbar","Phi+"] l_states = state_1a+state_1b l_states # version 20 circuits for demonstration # (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep") # these circuits limited to state creation are ready to be saved # for ultimately building circuits adapted to noisy simulator and real devices # as option, these circuits will include a row of id gates between creation and measurements circ_ori = [] for i_s in range(0,len(l_states),2): for perm in q_perm: mycircuit = QuantumCircuit(used_qubits, used_qubits) mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1]) circ_ori.append(mycircuit) # add measurement section to the circuit set newly created: nb_states = len(circ_ori) circ_ideal = copy.deepcopy(circ_ori) for i_state in range(nb_states): new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list) ideal_dic = {} ###Output _____no_output_____ ###Markdown Obtain result distributions on noise free simulator You may skip this section and go to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier" ###Code # execute on noise free simulator s_sim = 12000 job_simul = execute(circ_ideal, backend=simulator, shots=s_sim) tot_results_simul = job_simul.result() # establish a dictionary of count results on noise free simulator: # (this step is only useful if ram storage is performed) void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) tot_results_sim_dic = {} for i_state in range(nb_states): counts_simul = copy.deepcopy(void_counts) counts_simul.update(tot_results_simul.get_counts(i_state)) ideal_dic[str(i_state)]=counts_simul ###Output _____no_output_____ ###Markdown markdown for securityjson_dic_dumper(ideal_dic,"ideal_dic_"+project_name) Example of circuit for separable state of the first type for project 2 : $|W\rangle\otimes|\Psi^+\rangle$ ###Code i_state_test = 0 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ###Output Ide circuit # 0 ###Markdown Example of circuit for separable state of the second type for project 2 : $|W\rangle^{\otimes X}\otimes|\Phi^+\rangle$ ###Code i_state_test = 10 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ###Output Ide circuit # 10 ###Markdown Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ###Code # try loading the dictionary of results if its creation was skipped if len(ideal_dic) == 0: ideal_dic = json_dic_loader("ideal_dic_"+project_name) nb_states = len(ideal_dic) nb_labels = len(list(ideal_dic.values())[0]) s_sim = sum(list(ideal_dic.values())[0].values()) def print_first_and_last_row(PDM): print("first and last rows of the probability distribution matrix of dimension "+str(nb_states)+"x"+str(nb_labels)) print(np.round(PDM[0:1,:],4)) print(" ...") print(np.round(PDM[-1:,:],4)) PD_ideal = np.ndarray((nb_states,nb_labels)) for i_state in range(nb_states): PD_ideal[i_state, :] = list(ideal_dic[str(i_state)].values()) # now a little trick to get the ideal values from the simulator approximated values with np.errstate(divide='ignore'): # ignore the divide by zero warning PD_ideal = 1/np.round(s_sim/(PD_ideal)) # have a look at the matrix head and tail: print_first_and_last_row(PD_ideal) ###Output first and last rows of the probability distribution matrix of dimension 20x32 [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1667 0.1667 0. 0.1667 0. 0. 0. 0. 0.1667 0.1667 0. 0.1667 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ]] ... [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1667 0. 0. 0.1667 0. 0. 0. 0. 0.1667 0. 0. 0.1667 0.1667 0. 0. 0.1667 0. 0. 0. 0. ]] ###Markdown Monte Carlo simulation for the ideal device ###Code # here will be appended the data we want for the curve plot ideal_data_list=[] ###Output _____no_output_____ ###Markdown you may skip this cell and get stored curves by running the next cell ###Code # you may want to skip this cell as it will require a long time # because of the high number of trials required by the Monte Carlo simulation for each nb o shots value # the following values are defined in the study summary (readme file): trials=100 # to be set to 10000 if not demo window=5 # shorter window than for the real device counts epsilon = .001 min_shots = 5 max_shots = 100 pol=2 subset = None # variable not used here verbosality = 5 # printing step for intermediate results when increasing the experiment shot number PD_test = PD_ideal mitigation_dic = {"Na": None} o_metrics_desired = ['jensenshannon', 'sqeuclidean'] model_dic = {"ideal_sim": PD_ideal} for mit_str, mitigation in mitigation_dic.items(): if mitigation != None: # thus only for counts on real device PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) for o_metric in o_metrics_desired: for model_name in model_dic.keys(): add_single_dic(ideal_data_list) ###Output _____no_output_____ ###Markdown markdown for safetyjson_dic_dumper(ideal_data_list,"ideal_device_data_list_"+project_name) ###Code # get the stored results of the Monte Carlo simulation in case you skipped the previous step if len(ideal_data_list) == 0: ideal_data_list = json_dic_loader("ideal_device_data_list_"+project_name) # have a look at the mean error rate curves and error rate at save shot number n_s # NB the r_hat_mean curves and legend reported r_hat_max errors the unsmoothed values opl.plot_curves(ideal_data_list,np.array([0,1]), "Jensen-Shannon vs squared euclidean distance - $\epsilon=0.001$" , ["model"], ["device","metric"], right_xlimit = 20, bottom_ylimit = -0.001, top_ylimit = 0.05) ###Output _____no_output_____ ###Markdown Real device section ###Code from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() project_device = 'ibmq_valencia'# you may choice here a different backend device_name = dev_dic[project_device] mitig_name = program_name + "_" + device_name print(mitig_name) #determine here the backend device = provider.get_backend(project_device) # the backend names are listed here above properties = device.properties() coupling_map = device.configuration().coupling_map ###Output _____no_output_____ ###Markdown obtain mitigation filter markdown for demonb_shots_cal = 8192 set here the number of shots for the calibration phaseprint("backend:", device.name(), "qubit_list:", qubit_list)job_cal = execute(meas_calibs, backend=device, shots=nb_shots_cal)print(job_cal.job_id())job_monitor(job_cal)time_exp = time.strftime('%d/%m/%Y %H:%M:%S')print("DMY: ",time_exp) markdown for demohere we save mitigation resultscal_results = job_cal.result()cal_results_dic = cal_results.to_dict()to make date in dictionary serializable if there is a 'date' key:if 'date' in cal_results_dic.keys(): cal_results_dic['date']=str(cal_results_dic['date']) markdown for demo and securitydumpjson_dic_dumper(cal_results_dic,"cal_results_dic_"+ mitig_name) ###Code # retrieve the corresponding measurement mitigation filter obtained at experimental time # use a fake job because use of the from_dict method simulator = Aer.get_backend('qasm_simulator') fake_job_cal = execute(meas_calibs, backend=simulator, shots=1) fake_cal_results = fake_job_cal.result() cal_results_dic = json_dic_loader("cal_results_dic_"+mitig_name) if 'date' in cal_results_dic.keys(): str(cal_results_dic['date']) cal_results = fake_cal_results.from_dict(cal_results_dic) meas_fitter = CompleteMeasFitter(cal_results, label_list, qubit_list=qubit_list, circlabel='mcal') meas_filter = meas_fitter.filter # have a look at the average measurement fidefily of this device: print("Average Measurement Fidelity was: %f" % meas_fitter.readout_fidelity(), "for",project_device) ###Output Average Measurement Fidelity was: 0.794262 for ibmq_valencia ###Markdown Transpile the basic circuits for running on real deviceIn this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed).The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000' ###Code id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) project_name = mitig_name + "_" + unique_char + zfilled print(project_name) # transpile verbose = True summary_dic = {} seed_transpiler_list = list(range(nb_states)) real_circs = [] start_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Start at DMY: ",start_time) for i_state in list(range(nb_states)): # prepare circuit to be transpiled circuit = copy.deepcopy(circ_ori[i_state]) if id_gates > 0: circuit.barrier() for id_gates_index in range(id_gates): for index, value in enumerate(qubit_list): circuit.id(value) new_cut.add_barrier_and_measure(circuit, qubit_list) summary = [] depth_list = [] Q_state_opt_new = transpile(circuit, backend=device, coupling_map = coupling_map, seed_transpiler=seed_transpiler_list[i_state], optimization_level=2, initial_layout=qubit_list) summary_dic[i_state] = {"depth": Q_state_opt_new.depth(), 'circuit':Q_state_opt_new} real_circs.append(Q_state_opt_new) if verbose: print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"], "DMY: ",time.strftime('%d/%m/%Y %H:%M:%S')) end_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Completed at DMY: ",end_time) i_state_test = 10 print(project_device, "circuit #",i_state_test, "circuit length:",real_circs[i_state_test].depth()) #summary_dic[i_state_test]['depth']) # you may want to skip this if large nb of id gates before measurement real_circs[i_state_test].draw(output='mpl') #check a circuit on noise-free simulator job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim) print(project_device, "circuit #",i_state_test, "on noise free simulator") plot_histogram(job_simul.result().get_counts(), legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ###Output ibmq_valencia circuit # 10 on noise free simulator ###Markdown run job markdown for demorun the circuitsnb_shots = 8192print("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_real = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_real_id = job_real.job_id()print("job id:", job_real_id)job_monitor(job_real)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_real_id)tot_results_real = job_real.result()empirical_dic ={}for i_state_count, state_count in enumerate(tot_results_real.get_counts()): empirical_dic[str(i_state_count)] = state_count markdown for safetyjson_dic_dumper(job_real_id,"job_real_id_"+ project_name) markdown for safety at demojson_dic_dumper(empirical_dic,"experimental_"+ project_name) markdown for demo2d JOB RUNnb_shots = 8192run the circuitsprint("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_test = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_test_id = job_test.job_id()print("job id:", job_test_id)job_monitor(job_test)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_test_id)tot_results_test = job_test.result()test_dic ={}for i_state_count, state_count in enumerate(tot_results_test.get_counts()): test_dic[str(i_state_count)] = state_count markdown for safety at demojson_dic_dumper(job_test_id,"job_test_id_"+ project_name)json_dic_dumper(test_dic,"test_"+ project_name) Load the transpiled circuits that were actually run legacy: valid only for the GHZ Psi+ / W Phi- combinationotherwise go instead to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier" ###Code #changing keys of dictionary for merging: def key_change(ini_dict, i_subset): ini_list = [] len_ini = len(ini_dict) for i in range(len_ini): ini_list.append(str(i+i_subset*len_ini)) return dict(zip(ini_list, list(ini_dict.values()))) if program_name == "QAD": #retrieve the data corresponding to the 1st project lfc = list(Flag_char) circ_ideal =[] empirical_dic = {} for i_subset, subset in enumerate(lfc): qasm_circs_dic = json_dic_loader('qasm_circs_dic_QAD_'+device_name+'_'+ subset + zfilled) j=0 # j included for project with several transpilation sessions for each device - not used here qasm_circs = qasm_circs_dic[str(j)] nb_circs = len(qasm_circs) for i_circs in range(nb_circs): circ_ideal.append(QuantumCircuit().from_qasm_str(qasm_circs[i_circs])) empirical_dic = {**empirical_dic, **key_change(json_dic_loader("experimental"+"_"+mitig_name +"_"\ +subset+zfilled), i_subset)} test_dic = copy.deepcopy(empirical_dic) #nb_states = len(circ_ideal) ###Output _____no_output_____ ###Markdown Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ###Code if program_name == "AL2": empirical_dic = json_dic_loader('experimental_'+project_name) test_dic = json_dic_loader('test_'+project_name) def rectify_counts(tot_res, test_cqi,mitigation,m_filter) : void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) try: counts_results_real_test = tot_res[str(test_cqi)] except KeyError as error: counts_results_real_test = tot_res[test_cqi] raw_counts_test = copy.deepcopy(void_counts) raw_counts_test.update(counts_results_real_test) if mitigation: mitigated_results_test = meas_filter.apply(raw_counts_test, method = 'least_squares') returned_counts = copy.deepcopy(void_counts) returned_counts.update(mitigated_results_test) else: returned_counts = copy.deepcopy(raw_counts_test) return returned_counts ###Output _____no_output_____ ###Markdown Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ###Code def get_clean_matrix(dic, mitigation,m_filter): clean_matrix = np.ndarray((nb_states,nb_labels)) for i_state in range(nb_states): rectified_counts = rectify_counts(dic,i_state, mitigation,m_filter) # get a rectified counts dictionary clean_matrix[i_state, :] = list(rectified_counts.values()) clean_matrix = clean_matrix/clean_matrix.sum(axis=1, keepdims=True) return clean_matrix # We need to create a first matrix version. It will then vary for each considered set of distribution mitigation = False PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) print_first_and_last_row(PD_exper) if program_name == "QAD": PD_test = copy.deepcopy(PD_exper) elif program_name == "AL2": mitigation = False PD_test = get_clean_matrix(test_dic, mitigation=mitigation, m_filter=meas_filter) print_first_and_last_row(PD_test) ###Output first and last rows of the probability distribution matrix of dimension 20x32 [[0.0046 0.0127 0.0114 0.0004 0.0165 0.0005 0.0004 0.0005 0.0319 0.0939 0.078 0.0017 0.1027 0.0043 0.0055 0.0056 0.0479 0.1379 0.1162 0.0044 0.1602 0.006 0.0068 0.007 0.0161 0.0391 0.032 0.0012 0.0476 0.0023 0.0018 0.0032]] ... [[0.0112 0.0011 0.0031 0.0092 0.014 0.0031 0.0042 0.0157 0.0332 0.0076 0.0089 0.0248 0.141 0.0243 0.0243 0.1394 0.0138 0.0029 0.0023 0.0104 0.0656 0.0112 0.0123 0.0546 0.127 0.0189 0.0214 0.0896 0.0479 0.0081 0.0103 0.0389]] ###Markdown Monte Carlo simulation for the real device ###Code # here will be appended the data we want for the final plot of this notebook empirical_data_list=[] ###Output _____no_output_____ ###Markdown you may want to skip this cell and get stored curves by running the next cell ###Code # you may want to skip this cell as it will require a long time # because of the high number of trials required by the Monte Carlo simulation for each nb o shots value # the following values are defined in the study summary notebook: trials=100 # should be 1000 if not demo window=11 epsilon = .001 max_shots = 500 pol=2 verbosality = 10 # printing step for intermediate results when increasing the experiment shot number # In this section you can easily make your choice of combinations: # mitigation or not, metric, model mitigation_dic = {"no":False, "yes" : True} #mitigation_dic = {"no":False} #mitigation_dic = {"yes" : True} o_metrics_desired = ['jensenshannon', 'sqeuclidean'] #o_metrics_desired = ['jensenshannon'] #o_metrics_desired = ['sqeuclidean'] model_dic = {"empirical": PD_exper, "ideal_sim": PD_ideal} #model_dic = {"empirical": PD_exper} #model_dic = {"ideal_sim": PD_ideal} # Obtain a sequence of results in form of a list of dictionaries for mit_str, mitigation in mitigation_dic.items(): # here we toggle PD_exper as we toggled mitigation status PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) PD_test = get_clean_matrix(test_dic, mitigation=mitigation, m_filter=meas_filter) for o_metric in o_metrics_desired: print(project_name, model_dic.keys(), o_metric) for model_name in model_dic.keys(): add_single_dic(empirical_data_list) ###Output _____no_output_____ ###Markdown markdown fo securityjson_dic_dumper(empirical_data_list,'Tnemp_data_list_'+project_name) ###Code # get the stored results of the Monte Carlo simulation in case you skipped the previous step if len(empirical_data_list) == 0: empirical_data_list = json_dic_loader('Nemp_data_list_'+project_name) # have a look at the mean error rate curves and error rate at save shot number n_s # NB the r_hat_mean curves and legend reported r_hat_max errors are the unsmoothed values opl.plot_curves(ideal_data_list + empirical_data_list, np.array(range(2+len(empirical_data_list))), "$\epsilon=0.001$" , ["device"], ["model","metric","mitigation","id_gates"], right_xlimit = 80, bottom_ylimit = -0.02, top_ylimit = 1) import winsound duration = 2000 # milliseconds freq = 800 # Hz winsound.Beep(freq, duration) import qiskit.tools.jupyter %qiskit_version_table ###Output _____no_output_____ ###Markdown Classification of quantum states with high dimensional entanglement Circuits and computationsVersion compatible with 1st and 2d pilot studies ###Code import numpy as np import copy from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble from qiskit.tools.visualization import * from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal, CompleteMeasFitter, TensoredMeasFitter) import json from scipy.signal import savgol_filter import time from qiskit.tools.monitor import job_monitor from o_utils import ora # classifier utilities from o_plot import opl # utilities for result plot from c_utils import new_cut # circuit building utilities def json_dic_loader(dic_name): f = open(data_directory+dic_name+'.json') return json.load(f) ###Output _____no_output_____ ###Markdown markdown for safety on demodef json_dic_dumper(dic, dic_name): with open(data_directory+dic_name+'.json', 'w') as f: json.dump(dic,f) ###Code # common code for calling the classifier for ideal device and for real devices def add_single_dic(target_data_list): start_time = time.time() print("started",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name, "mitigation",mit_str,o_metric,model_name) # added for D,S,M choice. Mainstream : mixed set of 20 states first = 0 last = nb_states if unique_char == "D": last = int(nb_states/2) elif unique_char == "S": first = int(nb_states/2) # get the classifier error curve in function of the number of shot and the "safe shot number" error_curve, safe_rate, ernb = ora.provide_error_curve(PD_model=model_dic[model_name][first:last,:], PD_test=PD_test[first:last,:], trials=trials, window=window, epsilon=epsilon, max_shots=max_shots, pol=pol, verbosality=verbosality) tail = savgol_filter(ernb, window, pol, axis=0) len_curve = len(error_curve) safe_shot_nb = len_curve - int((window-1)/2) # OK print('safe_shot_nb',safe_shot_nb, 'safe_rate',safe_rate, "nb trials:",trials) err_rates = tail[int((window-1)/2),:]/trials err_rate_max = np.max(err_rates) err_rate_min = np.min(err_rates) r=4 print("savgol interpolated error rate mean:", np.round(np.mean(err_rates),r), "min:", np.round(err_rate_min,r), "max:", np.round(err_rate_max,r), "for", [ien for ien, jen in enumerate(err_rates) if jen == err_rate_max]) end_time = time.time() #save the data in a list of dictionaries : single_dic={"project":mitig_name, "id_gates":id_gates, "mitigation":mit_str, "model":model_name, "metric":o_metric, "device":project_device, "curve_length":len_curve, "shots": safe_shot_nb, "shots_rate": safe_rate, "error_curve":error_curve, "trials":trials,"window":window, "epsilon":epsilon,"SG_pol": pol, "computation_time":end_time-start_time, "time_completed":time.strftime('%d/%m/%Y %H:%M:%S'), "trials":trials, "QV": QV_dic[project_device], "fidelity": fidelity_dic[project_device], "error_nb":ernb} target_data_list.append(single_dic) print("completed",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name, "mitigation",mit_str,o_metric,model_name,"\n") ###Output _____no_output_____ ###Markdown Set up the simulator and layout for 5 qubits ###Code simulator = Aer.get_backend('qasm_simulator') #specify the layout of the devices used_qubits = 5 qubit_list = [0,1,2,3,4] #short_version = False #program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+ program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+ Flag_char = "DS" # this for a mix of two types of separable states if len(Flag_char) >= 2: unique_char = "M" else: unique_char = Flag_char # These dictionaries for the devices used in the study if program_name == "QAD": fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380, "ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ideal_device': 1.0} data_directory = "data_files/" elif program_name == "AL2": fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974, "ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ideal_device': 1.0} data_directory = "data2_files/" QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0, "ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf} dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our", "ibmqx2": 'Yor', 'ideal_device': "Ide"} # specify the device: here first the ideal noise-free device project_device = 'ideal_device' device_name = dev_dic[project_device] # specify the nb of id gates between state creation and measurements # zero for the ideal device id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) # tail of the file names for RAM storage mitig_name = program_name + "_" + device_name project_name = mitig_name + "_" + unique_char + zfilled print(mitig_name) print(project_name) # establish the result label list # meas_calibs will be used for mitigation in the real device section qr = QuantumRegister(used_qubits) meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal') nb_labels=len(label_list) print(nb_labels,label_list) len(meas_calibs) # permutation list # here it is simple to write down the list, # but a version using itertools will be wellcome for >5 qubits projects if used_qubits == 5: q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3], [0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]] else: print("work in progress - meanwhile please provide the list of permutations") ###Output _____no_output_____ ###Markdown Create the quantum states ###Code # define the two subsets of 10 separable states if program_name == "QAD": state_1a = ["W","Phi+"] state_1b = ["GHZ","Psi+"] elif program_name == "ALT" or "AL2": state_1a = ["W","Psi+"] state_1b = ["Wbar","Phi+"] l_states = state_1a+state_1b l_states # version 20 circuits for demonstration # (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep") # these circuits limited to state creation are ready to be saved # for ultimately building circuits adapted to noisy simulator and real devices # as option, these circuits will include a row of id gates between creation and measurements circ_ori = [] for i_s in range(0,len(l_states),2): for perm in q_perm: mycircuit = QuantumCircuit(used_qubits, used_qubits) mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1]) circ_ori.append(mycircuit) # add measurement section to the circuit set newly created: nb_states = len(circ_ori) circ_ideal = copy.deepcopy(circ_ori) for i_state in range(nb_states): new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list) ideal_dic = {} ###Output _____no_output_____ ###Markdown Obtain result distributions on noise free simulator You may skip this section and go to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier" ###Code # execute on noise free simulator s_sim = 12000 job_simul = execute(circ_ideal, backend=simulator, shots=s_sim) tot_results_simul = job_simul.result() # establish a dictionary of count results on noise free simulator: # (this step is only useful if ram storage is performed) void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) tot_results_sim_dic = {} for i_state in range(nb_states): counts_simul = copy.deepcopy(void_counts) counts_simul.update(tot_results_simul.get_counts(i_state)) ideal_dic[str(i_state)]=counts_simul ###Output _____no_output_____ ###Markdown markdown for securityjson_dic_dumper(ideal_dic,"ideal_dic_"+project_name) Example of circuit for separable state of the first type ($W\otimes\Phi^+\; or\; W\otimes\Psi^+$): ###Code i_state_test = 10 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ###Output Ide circuit # 10 ###Markdown Example of circuit for separable state of the second type ($GHZ\otimes\Psi^+ \; or\; \bar{W}\otimes\Phi^+$): ###Code i_state_test = 10 print(device_name, "circuit #",i_state_test) circ_ideal[i_state_test].draw(output='mpl') print(device_name, "circuit #",i_state_test) plot_histogram(ideal_dic[str(i_state_test)], legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ###Output Ide circuit # 10 ###Markdown Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ###Code # try loading the dictionary of results if its creation was skipped if len(ideal_dic) == 0: ideal_dic = json_dic_loader("ideal_dic_"+project_name) nb_states = len(ideal_dic) nb_labels = len(list(ideal_dic.values())[0]) s_sim = sum(list(ideal_dic.values())[0].values()) PD_ideal = np.ndarray((nb_states,nb_labels)) for i_state in range(nb_states): PD_ideal[i_state, :] = list(ideal_dic[str(i_state)].values()) # now a little trick to get the ideal values from the simulator approximated values with np.errstate(divide='ignore'): # ignore the divide by zero warning PD_ideal = 1/np.round(s_sim/(PD_ideal)) # have a look at the matrix head and tail: print("first and last state probability distributions:") print(np.round(np.vstack((PD_ideal[0:1,:],PD_ideal[-1:,:])),4)) ###Output first and last state probability distributions: [[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1667 0.1667 0. 0.1667 0. 0. 0. 0. 0.1667 0.1667 0. 0.1667 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. ] [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1667 0. 0. 0.1667 0. 0. 0. 0. 0.1667 0. 0. 0.1667 0.1667 0. 0. 0.1667 0. 0. 0. 0. ]] ###Markdown Monte Carlo simulation for the ideal device ###Code # here will be appended the data we want for the curve plot ideal_data_list=[] ###Output _____no_output_____ ###Markdown you may skip this cell and get stored curves by running the next cell ###Code # you may want to skip this cell as it will require a long time # because of the high number of trials required by the Monte Carlo simulation for each nb o shots value # the following values are defined in the study summary (readme file): trials=100 # to be set to 10000 if not demo window=5 # shorter window than for the real device counts epsilon = .001 min_shots = 5 max_shots = 100 pol=2 subset = None # variable not used here verbosality = 5 # printing step for intermediate results when increasing the experiment shot number PD_test = PD_ideal mitigation_dic = {"Na": None} o_metrics_desired = ['jensenshannon', 'sqeuclidean'] model_dic = {"ideal_sim": PD_ideal} for mit_str, mitigation in mitigation_dic.items(): if mitigation != None: # thus only for counts on real device PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) for o_metric in o_metrics_desired: for model_name in model_dic.keys(): add_single_dic(ideal_data_list) ###Output _____no_output_____ ###Markdown markdown for safetyjson_dic_dumper(ideal_data_list,"ideal_device_data_list_"+project_name) ###Code # get the stored results of the Monte Carlo simulation in case you skipped the previous step if len(ideal_data_list) == 0: ideal_data_list = json_dic_loader("ideal_device_data_list_"+project_name) # have a look at the mean error rate curves and error rate at save shot number n_s # NB the r_hat_mean curves and legend reported r_hat_max errors the unsmoothed values opl.plot_curves(ideal_data_list,np.array([0,1]), "Jensen-Shannon vs squared euclidean distance - $\epsilon=0.001$" , ["model"], ["device","metric"], right_xlimit = 20, bottom_ylimit = -0.001, top_ylimit = 0.05) ###Output _____no_output_____ ###Markdown Real device section ###Code from qiskit import IBMQ IBMQ.load_account() provider = IBMQ.get_provider(hub='ibm-q') provider.backends() project_device = 'ibmq_valencia'# you may choice here a different backend device_name = dev_dic[project_device] mitig_name = program_name + "_" + device_name print(mitig_name) #determine here the backend device = provider.get_backend(project_device) # the backend names are listed here above properties = device.properties() coupling_map = device.configuration().coupling_map ###Output _____no_output_____ ###Markdown obtain mitigation filter markdown for demonb_shots_cal = 8192 set here the number of shots for the calibration phaseprint("backend:", device.name(), "qubit_list:", qubit_list)job_cal = execute(meas_calibs, backend=device, shots=nb_shots_cal)print(job_cal.job_id())job_monitor(job_cal)time_exp = time.strftime('%d/%m/%Y %H:%M:%S')print("DMY: ",time_exp) markdown for demohere we save mitigation resultscal_results = job_cal.result()cal_results_dic = cal_results.to_dict()to make date in dictionary serializable if there is a 'date' key:if 'date' in cal_results_dic.keys(): cal_results_dic['date']=str(cal_results_dic['date']) markdown for demo and securitydumpjson_dic_dumper(cal_results_dic,"cal_results_dic_"+ mitig_name) ###Code # retrieve the corresponding measurement mitigation filter obtained at experimental time # use a fake job because use of the from_dict method simulator = Aer.get_backend('qasm_simulator') fake_job_cal = execute(meas_calibs, backend=simulator, shots=1) fake_cal_results = fake_job_cal.result() cal_results_dic = json_dic_loader("cal_results_dic_"+mitig_name) if 'date' in cal_results_dic.keys(): str(cal_results_dic['date']) cal_results = fake_cal_results.from_dict(cal_results_dic) meas_fitter = CompleteMeasFitter(cal_results, label_list, qubit_list=qubit_list, circlabel='mcal') meas_filter = meas_fitter.filter # have a look at the average measurement fidefily of this device: print("Average Measurement Fidelity was: %f" % meas_fitter.readout_fidelity(), "for",project_device) ###Output Average Measurement Fidelity was: 0.794262 for ibmq_valencia ###Markdown Transpile the basic circuits for running on real deviceIn this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed).The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000' ###Code id_gates = 0 str_nb_id = str(id_gates) zfilled = str_nb_id.zfill(4-len(str_nb_id)) project_name = mitig_name + "_" + unique_char + zfilled print(project_name) # transpile verbose = True summary_dic = {} seed_transpiler_list = list(range(nb_states)) real_circs = [] start_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Start at DMY: ",start_time) for i_state in list(range(nb_states)): # prepare circuit to be transpiled circuit = copy.deepcopy(circ_ori[i_state]) if id_gates > 0: circuit.barrier() for id_gates_index in range(id_gates): for index, value in enumerate(qubit_list): circuit.id(value) new_cut.add_barrier_and_measure(circuit, qubit_list) summary = [] depth_list = [] Q_state_opt_new = transpile(circuit, backend=device, coupling_map = coupling_map, seed_transpiler=seed_transpiler_list[i_state], optimization_level=2, initial_layout=qubit_list) summary_dic[i_state] = {"depth": Q_state_opt_new.depth(), 'circuit':Q_state_opt_new} real_circs.append(Q_state_opt_new) if verbose: print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"], "DMY: ",time.strftime('%d/%m/%Y %H:%M:%S')) end_time = time.strftime('%d/%m/%Y %H:%M:%S') print("Completed at DMY: ",end_time) i_state_test = 10 print(project_device, "circuit #",i_state_test, "circuit length:",real_circs[i_state_test].depth()) #summary_dic[i_state_test]['depth']) # you may want to skip this if large nb of id gates before measurement real_circs[i_state_test].draw(output='mpl') #check a circuit on noise-free simulator job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim) print(project_device, "circuit #",i_state_test, "on noise free simulator") plot_histogram(job_simul.result().get_counts(), legend=['noise free simulation'], color = "b", figsize=(10.,5.)) ###Output ibmq_valencia circuit # 10 on noise free simulator ###Markdown run job markdown for demorun the circuitsnb_shots = 8192print("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_real = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_real_id = job_real.job_id()print("job id:", job_real_id)job_monitor(job_real)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_real_id)tot_results_real = job_real.result()empirical_dic ={}for i_state_count, state_count in enumerate(tot_results_real.get_counts()): empirical_dic[str(i_state_count)] = state_count markdown for safetyjson_dic_dumper(job_real_id,"job_real_id_"+ project_name) markdown for safety at demojson_dic_dumper(empirical_dic,"experimental_"+ project_name) markdown for demo2d JOB RUNnb_shots = 8192run the circuitsprint("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_test = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_test_id = job_test.job_id()print("job id:", job_test_id)job_monitor(job_test)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_test_id)tot_results_test = job_test.result()test_dic ={}for i_state_count, state_count in enumerate(tot_results_test.get_counts()): test_dic[str(i_state_count)] = state_count markdown for safety at demojson_dic_dumper(job_test_id,"job_test_id_"+ project_name)json_dic_dumper(test_dic,"test_"+ project_name) Load the transpiled circuits that were actually run legacy: valid only for the GHZ Psi+ / W Phi- combinationotherwise go instead to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier" ###Code #changing keys of dictionary for merging: def key_change(ini_dict, i_subset): ini_list = [] len_ini = len(ini_dict) for i in range(len_ini): ini_list.append(str(i+i_subset*len_ini)) return dict(zip(ini_list, list(ini_dict.values()))) if program_name == "QAD": #retrieve the data corresponding to the 1st project lfc = list(Flag_char) circ_ideal =[] empirical_dic = {} for i_subset, subset in enumerate(lfc): qasm_circs_dic = json_dic_loader('qasm_circs_dic_QAD_'+device_name+'_'+ subset + zfilled) j=0 # j included for project with several transpilation sessions for each device - not used here qasm_circs = qasm_circs_dic[str(j)] nb_circs = len(qasm_circs) for i_circs in range(nb_circs): circ_ideal.append(QuantumCircuit().from_qasm_str(qasm_circs[i_circs])) empirical_dic = {**empirical_dic, **key_change(json_dic_loader("experimental"+"_"+mitig_name +"_"\ +subset+zfilled), i_subset)} test_dic = copy.deepcopy(empirical_dic) #nb_states = len(circ_ideal) ###Output _____no_output_____ ###Markdown Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ###Code if program_name == "AL2": empirical_dic = json_dic_loader('experimental_'+project_name) test_dic = json_dic_loader('test_'+project_name) def rectify_counts(tot_res, test_cqi,mitigation,m_filter) : void_counts = dict(zip(label_list, np.zeros(2**used_qubits))) try: counts_results_real_test = tot_res[str(test_cqi)] except KeyError as error: counts_results_real_test = tot_res[test_cqi] raw_counts_test = copy.deepcopy(void_counts) raw_counts_test.update(counts_results_real_test) if mitigation: mitigated_results_test = meas_filter.apply(raw_counts_test, method = 'least_squares') returned_counts = copy.deepcopy(void_counts) returned_counts.update(mitigated_results_test) else: returned_counts = copy.deepcopy(raw_counts_test) return returned_counts ###Output _____no_output_____ ###Markdown Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier ###Code def get_clean_matrix(dic, mitigation,m_filter): clean_matrix = np.ndarray((nb_states,nb_labels)) for i_state in range(nb_states): rectified_counts = rectify_counts(dic,i_state, mitigation,m_filter) # get a rectified counts dictionary clean_matrix[i_state, :] = list(rectified_counts.values()) clean_matrix = clean_matrix/clean_matrix.sum(axis=1, keepdims=True) return clean_matrix # We need to create a first matrix version. It will then vary for each considered set of distribution mitigation = False PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) print("first and last state probability distributions:") print(np.round(np.vstack((PD_exper[0:1,:],PD_exper[-1:,:])),3)) if program_name == "QAD": PD_test = copy.deepcopy(PD_exper) elif program_name == "AL2": mitigation = False PD_test = get_clean_matrix(test_dic, mitigation=mitigation, m_filter=meas_filter) print("first and last state probability distributions:") print(np.round(np.vstack((PD_test[0:1,:],PD_test[-1:,:])),3)) ###Output first and last state probability distributions: [[0.005 0.013 0.011 0. 0.016 0. 0. 0. 0.032 0.094 0.078 0.002 0.103 0.004 0.005 0.006 0.048 0.138 0.116 0.004 0.16 0.006 0.007 0.007 0.016 0.039 0.032 0.001 0.048 0.002 0.002 0.003] [0.011 0.001 0.003 0.009 0.014 0.003 0.004 0.016 0.033 0.008 0.009 0.025 0.141 0.024 0.024 0.139 0.014 0.003 0.002 0.01 0.066 0.011 0.012 0.055 0.127 0.019 0.021 0.09 0.048 0.008 0.01 0.039]] ###Markdown Monte Carlo simulation for the real device ###Code # here will be appended the data we want for the final plot of this notebook empirical_data_list=[] ###Output _____no_output_____ ###Markdown you may want to skip this cell and get stored curves by running the next cell ###Code # you may want to skip this cell as it will require a long time # because of the high number of trials required by the Monte Carlo simulation for each nb o shots value # the following values are defined in the study summary notebook: trials=100 # should be 1000 if not demo window=11 epsilon = .001 max_shots = 500 pol=2 verbosality = 10 # printing step for intermediate results when increasing the experiment shot number # In this section you can easily make your choice of combinations: # mitigation or not, metric, model mitigation_dic = {"no":False, "yes" : True} #mitigation_dic = {"no":False} #mitigation_dic = {"yes" : True} o_metrics_desired = ['jensenshannon', 'sqeuclidean'] #o_metrics_desired = ['jensenshannon'] #o_metrics_desired = ['sqeuclidean'] model_dic = {"empirical": PD_exper, "ideal_sim": PD_ideal} #model_dic = {"empirical": PD_exper} #model_dic = {"ideal_sim": PD_ideal} # Obtain a sequence of results in form of a list of dictionaries for mit_str, mitigation in mitigation_dic.items(): # here we toggle PD_exper as we toggled mitigation status PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation, m_filter=meas_filter) PD_test = get_clean_matrix(test_dic, mitigation=mitigation, m_filter=meas_filter) for o_metric in o_metrics_desired: print(project_name, model_dic.keys(), o_metric) for model_name in model_dic.keys(): add_single_dic(empirical_data_list) ###Output _____no_output_____ ###Markdown markdown fo securityjson_dic_dumper(empirical_data_list,'Tnemp_data_list_'+project_name) ###Code # get the stored results of the Monte Carlo simulation in case you skipped the previous step if len(empirical_data_list) == 0: empirical_data_list = json_dic_loader('Nemp_data_list_'+project_name) # have a look at the mean error rate curves and error rate at save shot number n_s # NB the r_hat_mean curves and legend reported r_hat_max errors are the unsmoothed values opl.plot_curves(ideal_data_list + empirical_data_list, np.array(range(2+len(empirical_data_list))), "$\epsilon=0.001$" , ["device"], ["model","metric","mitigation","id_gates"], right_xlimit = 80, bottom_ylimit = -0.02, top_ylimit = 1) import winsound duration = 2000 # milliseconds freq = 800 # Hz winsound.Beep(freq, duration) import qiskit.tools.jupyter %qiskit_version_table ###Output _____no_output_____
examples/Jupyter_notebook_examples/model_to_scripts/model2scripts.ipynb
###Markdown 1. Define a model ###Code p = Process("process") p_con = p.Condition("Condition 1") p1 = p_con.Process("process 1") p2 = p_con.Process("process 2") p_act1 = p1.Action("Action 1") p_act2 = p2.Action("Action 2") p_act3 = p.Action("Action 3") show(p, width=400, height=400) ###Output _____no_output_____ ###Markdown 2 Generatre a script from themodel ###Code sg = ScriptGenerator() sg.run(p) print(sg.script) ###Output ID2014082256776 = Process('process') ID2014082256712 = ID2014082256776.Condition('Condition 1') ID2014082196360 = ID2014082256712.Process('process 1') ID2014083412872 = ID2014082196360.Action('Action 1') ID2014083412552 = ID2014082256712.Process('process 2') ID2014083413192 = ID2014083412552.Action('Action 2') ID2014083413448 = ID2014082256776.Action('Action 3')
jupyter-notebooks/ParameterSpace extraction from data.ipynb
###Markdown This notebook shows the functions provided by TINC to create parameter spaces and data pools from configuration and output files. There are two methods: * Using the output data * Using configuration files Using the output can be very convenient as you can create a DataPool automatically to explore the data, but there are cases where you might need to extract the parameter space from configuration files instead, in cases where the parameters are not present in the output data. ###Code from tinc import * %pylab inline ###Output Populating the interactive namespace from numpy and matplotlib ###Markdown tl;dr Create datapools from outputYou can create a data pool directly from results using ```create_datapool_from_output()```. This function returns the datapool and parameter space extracted from the data files. ###Code data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot' dp,ps = create_datapool_from_output(data_dir, "results.json", ignore_params=['Beta'], debug = False) figure(figsize=(12,4)) subplot(1,2, 1) xlabel("T") ps.get_dimension("param_chem_pot(a)").value = 3.8 plot(ps.get_parameter("T").values, dp.get_slice("<formation_energy>", "T")) ps.get_parameter("param_chem_pot(a)").value = 3.9 plot(ps.get_parameter("T").values, dp.get_slice("<formation_energy>", "T")) legend([3.8, 3.9]) subplot(1,2, 2) xlabel("param_chem_pot(a)") temp_values = linspace(100, 2800, 5) for temp_val in temp_values: ps.get_dimension("T").value = temp_val plot(ps.get_parameter("param_chem_pot(a)").get_values(), dp.get_slice("<formation_energy>", "param_chem_pot(a)"), marker='o') legend(temp_values); ###Output Found more than one potential parameter in files:['Beta', 'T'] Using:T ###Markdown Extracting from configuration filesTo extract a parameter space from configuration files, you must provide a data root directory and the name of the configuration file that can be found in subdirectories. This assumes all configuration file names are the same.You will also need to describe how to extract the information from the configuration files. This is done by specifying the keys where the parameter data is found. For example if the configuration files look like:```json{ "driver" : { "mode" : "incremental", "motif" : { "configname" : "restricted_auto", "_configname" : "SCEL1_1_1_1_0_0_0/0", "_configdof" : "$HOME/laptop_share/NbO_rocksalt_gs/mc_runs/fit_13.02/coarse_grid/set2_cooling_grid2/A_3.9B_-19.1/conditions.298/tto/final_state.json" }, "initial_conditions" : { "param_chem_pot" : { "a" : 3.90, "b" : -19.80 }, "temperature" : 20.0, "tolerance" : 0.001 }, "final_conditions" : { "param_chem_pot" : { "a" : 3.90, "b" : -19.80 }, "temperature" :2800.0, "tolerance" : 0.001 }, "incremental_conditions" : { "param_chem_pot" : { "a" : 0.0, "b" : 0.0 }, "temperature" : 10.0, "tolerance" : 0.001 } }}```You specify the starting value key as: ```driver/initial_conditions/*``` because the starting values are a list within the "driver" and " initial_conditions" keys. A similar string needs to be constructed for end and increment keys.Current limitations: * JSON only * space must be described by its boundaries and the incremement * Limited format to describe how to extract the information. Currently values must be leaf nodes. Extracting parameter space valuesThe function ```extract_parameter_space_data``` will extract the parameter values as a dictionary. This can be useful as a initial step to ensure values are being extracted correctly. ###Code data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot' config_file = 'mc_settings.json' parameter_start_key = 'driver/initial_conditions/*' parameter_end_key = 'driver/final_conditions/*' parameter_increment_key = 'driver/incremental_conditions/*' extract_parameter_space_data(data_dir, config_file, parameter_start_key, parameter_end_key, parameter_increment_key) ###Output _____no_output_____ ###Markdown Creating parameter spacesThe ```make_parameter_space``` function returns a fully created parameter space from the configuration files. The only remaining step to perform to make the parameter space usable is to set the path template using ```set_current_path_template```. This template describes how the parameter values map to the filesystem. ###Code data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot' config_file = 'mc_settings.json' parameter_start_key = 'driver/initial_conditions/*' parameter_end_key = 'driver/final_conditions/*' parameter_increment_key = 'driver/incremental_conditions/*' ps = make_parameter_space(data_dir, config_file, parameter_start_key, parameter_end_key, parameter_increment_key, ps_name="casmParams") ps.set_current_path_template("A_%%param_chem_pot(a)%%B_%%param_chem_pot(b)%%") ps.print() ###Output _____no_output_____ ###Markdown Data pools for the new parameter spaceOnce the parameter space has been extracted, a DataPool can be created to access the data across all the directories. After creating the data pool, you need to register the files that contain the data, in this case, the "results.json" file spans the temperature parameter. These data files must be located in the path defined through ```set_current_path_template()``` above. ###Code dp = DataPoolJson("results", ps, "slice_dir") dp.register_data_file("results.json", "temperature") dp.get_current_files() ps.get_current_relative_path() ###Output _____no_output_____ ###Markdown You can query the fields available in the data files: ###Code dp.list_fields() ps.get_dimension("temperature").value = 1500 ps.get_dimension("param_chem_pot(b)").value = -19.9 ###Output _____no_output_____ ###Markdown You can request slices of data, from a single data file (temperature is the parameter contained in the individual files): ###Code #dp.debug = True ps.get_dimension("param_chem_pot(a)").value = 3.8 plot(ps.get_parameter("temperature").values, dp.get_slice("<formation_energy>", "temperature")) ps.get_parameter("param_chem_pot(a)").value = 3.9 plot(ps.get_parameter("temperature").values, dp.get_slice("<formation_energy>", "temperature")) legend(ps.get_dimension("param_chem_pot(a)").values) len(dp.get_slice("<formation_energy>", "temperature")), len(ps.get_parameter('temperature').values) ###Output _____no_output_____ ###Markdown Or you can request slices that take a single value from a number of results file, when the requested slicing dimension is a dimension that affects the current path: ###Code ps.get_parameter("param_chem_pot(a)").values dp.get_slice("<formation_energy>", "param_chem_pot(a)") ps._path_template temp_values = linspace(100, 2800, 10) for temp_val in temp_values: ps.get_dimension("temperature").value = temp_val plot(ps.get_parameter("param_chem_pot(a)").values, dp.get_slice("<formation_energy>", "param_chem_pot(a)"), marker='o') legend(temp_values); len(dp.get_slice("<formation_energy>", "param_chem_pot(a)")), len(ps.get_parameter('param_chem_pot(a)').values) ###Output _____no_output_____ ###Markdown Making parameter space from output filesYou can also extract parameter spaces from output files, to analyze output data, or when the input parameters are available in output data files.To do this, you must provide a root path and define a function that reads the a data file and returns a dictionary with the possible values each parameter can take according to the data file. The format should be the same provided by the ```extract_parameter_space_data``` function above. ###Code def read_func(path): with open(path) as f: j = json.load(f) return j data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot' ps = extract_parameter_space_from_output(data_dir, "results.json", read_func) ps.print() ###Output _____no_output_____ ###Markdown If there are two potential parameters inside the result files, the first one found will be used. You can instruct which ones to ignore using the ```ignore_params=``` argument: ###Code ps = extract_parameter_space_from_output(data_dir, "results.json", read_func, ignore_params=['Beta']) ps.print() ###Output _____no_output_____ ###Markdown Creating Datapools from output You can create a data pool directly from results using ```create_datapool_from_output()```. This function outputs the datapool and parameter space extracted from the data files. ###Code dp,ps = create_datapool_from_output(data_dir, "results.json", read_func, ignore_params=['Beta']) ps.get_parameter("param_chem_pot(b)").value, ps.get_parameter("param_chem_pot(b)").values, ps.get_parameter("param_chem_pot(b)").ids, ps.get_parameter("param_chem_pot(b)").get_space_stride() dp.list_fields() ps = dp.get_parameter_space() ps.get_current_relative_path() ps.get_root_path() #dp.debug = True plot(ps.get_parameter("T").get_values(), dp.get_slice("<formation_energy>", "T")); ps.get_parameter("param_chem_pot(a)").ids ps.get_current_relative_path() ps.is_filesystem_dimension("param_chem_pot(a)") ps.get_parameter("param_chem_pot(a)").values ps.get_common_id([ps.get_parameter("param_chem_pot(a)"),ps.get_parameter("param_chem_pot(b)")], {'T': 0, 'param_chem_pot(a)': 0, 'param_chem_pot(b)': 0}) ps.is_filesystem_dimension('param_chem_pot(a)') ps.resolve_template(ps._path_template, {'T': 0, 'param_chem_pot(a)': 3, 'param_chem_pot(b)': 0}) dp.get_slice("Beta", "param_chem_pot(a)") set(ps.get_parameter("param_chem_pot(a)").values) plot(ps.get_parameter("param_chem_pot(a)").get_values(), dp.get_slice("Beta", "param_chem_pot(a)"), marker='o') ###Output _____no_output_____ ###Markdown TODO... Ignore below... ###Code import netCDF4 ps_filename = "parameter_space.nc" sub_dir = sub_dirs[0] full_path = data_dir + subdir + ps_filename ps_file = netCDF4.Dataset(full_path, "w", format="NETCDF4") params = ps_file.createGroup("internal_parameters") mapped_params = ps_file.createGroup("mapped_parameters") index_params = ps_file.createGroup("index_parameters") for param_name, space in param_space.items(): param_group = rootgrp.createVariable("values","f8",("internal_parameters",)) mapped_group = rootgrp.createVariable("values","f8",("mapped_parameters",)) mapped_var_ids = rootgrp.createVariable("ids","s",("mapped_parameters",)) index_group = rootgrp.createVariable("values","f8",("index_parameters",)) param_var = rootgrp.createVariable("values","f8",("internal_parameters",)) mapped_var = rootgrp.createVariable("values","f8",("mapped_parameters",)) mapped_var_ids = rootgrp.createVariable("ids","s",("mapped_parameters",)) index_params = rootgrp.createVariable("values","f8",("index_parameters",)) ps_file.close() ###Output _____no_output_____ ###Markdown Testing single filesystem parameter: ###Code data_dir = r'C:\Users\Andres\source\repos\vdv_data\MonteCarlo_0' dp,ps = create_datapool_from_output(data_dir, "results.json", ignore_params=["Beta"] ) ps = dp.get_parameter_space() ps.print() ps._path_template ps.get_current_relative_path() dp.list_fields() dp.get_slice("<formation_energy>", "T") dp.get_slice("<formation_energy>", "param_chem_pot(a)") ps.is_filesystem_dimension("T") ###Output _____no_output_____
examples/reference/widgets/RangeSlider.ipynb
###Markdown The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range where events are throttled by `callback_throttle` value. Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``callback_policy``** (str, **DEPRECATED**): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')* **``callback_throttle``** (int, **DEPRECATED**): Number of milliseconds to pause between callback calls as the slider is moved* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___ ###Code range_slider = pn.widgets.RangeSlider( name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01) range_slider ###Output _____no_output_____ ###Markdown ``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets: ###Code range_slider.value ###Output _____no_output_____ ###Markdown ControlsThe `RangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively: ###Code pn.Row(range_slider.controls(jslink=True), range_slider) ###Output _____no_output_____ ###Markdown The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range throttled until mouseup Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``format``** (str, bokeh.models.TickFormatter): Formatter to apply to the slider value* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___ ###Code range_slider = pn.widgets.RangeSlider( name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01) range_slider ###Output _____no_output_____ ###Markdown ``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets: ###Code range_slider.value ###Output _____no_output_____ ###Markdown A custom format string or bokeh TickFormatter may be used to format the slider values: ###Code from bokeh.models.formatters import PrintfTickFormatter str_format = pn.widgets.RangeSlider(name='Distance', format='0.0a', start=100000, end=1000000) tick_format = pn.widgets.RangeSlider(name='Distance', format=PrintfTickFormatter(format='%.3f m')) pn.Column(str_format, tick_format) ###Output _____no_output_____ ###Markdown ControlsThe `RangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively: ###Code pn.Row(range_slider.controls(jslink=True), range_slider) ###Output _____no_output_____ ###Markdown The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range where events are throttled by `callback_throttle` value. Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``format``** (str, bokeh.models.TickFormatter): Formatter to apply to the slider value* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___ ###Code range_slider = pn.widgets.RangeSlider( name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01) range_slider ###Output _____no_output_____ ###Markdown ``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets: ###Code range_slider.value ###Output _____no_output_____ ###Markdown A custom format string or bokeh TickFormatter may be used to format the slider values: ###Code from bokeh.models.formatters import PrintfTickFormatter str_format = pn.widgets.RangeSlider(name='Distance', format='0.0a', start=100000, end=1000000) tick_format = pn.widgets.RangeSlider(name='Distance', format=PrintfTickFormatter(format='%.3f m')) pn.Column(str_format, tick_format) ###Output _____no_output_____ ###Markdown ControlsThe `RangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively: ###Code pn.Row(range_slider.controls(jslink=True), range_slider) ###Output _____no_output_____ ###Markdown The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``callback_policy``** (str): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')* **``callback_throttle``** (int): Number of milliseconds to pause between callback calls as the slider is moved* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___ ###Code range_slider = pn.widgets.RangeSlider( name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01) range_slider ###Output _____no_output_____ ###Markdown ``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets: ###Code range_slider.value ###Output _____no_output_____ ###Markdown The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range where events are throttled by `callback_throttle` value. Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``callback_policy``** (str, **DEPRECATED**): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')* **``callback_throttle``** (int): Number of milliseconds to pause between callback calls as the slider is moved* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___ ###Code range_slider = pn.widgets.RangeSlider( name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01) range_slider ###Output _____no_output_____ ###Markdown ``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets: ###Code range_slider.value ###Output _____no_output_____
recurrent-neural-network-lstm.ipynb
###Markdown Import Libraries ###Code import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Dense, Dropout, LSTM from sklearn.metrics import mean_absolute_error from tensorflow.keras import layers from datetime import datetime import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore', category=DeprecationWarning) warnings.filterwarnings('ignore', category=FutureWarning) ###Output _____no_output_____ ###Markdown Import data ###Code crypto_df = pd.read_csv("../input/g-research-crypto-forecasting/train.csv") crypto_df.head() asset_details = pd.read_csv('../input/g-research-crypto-forecasting/asset_details.csv') asset_details # Select Asset_ID = 6 for Ethereum crypto_df = crypto_df[crypto_df["Asset_ID"]==6] crypto_df.info(show_counts =True) ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 1956200 entries, 5 to 24236799 Data columns (total 10 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 timestamp 1956200 non-null int64 1 Asset_ID 1956200 non-null int64 2 Count 1956200 non-null float64 3 Open 1956200 non-null float64 4 High 1956200 non-null float64 5 Low 1956200 non-null float64 6 Close 1956200 non-null float64 7 Volume 1956200 non-null float64 8 VWAP 1956200 non-null float64 9 Target 1955860 non-null float64 dtypes: float64(8), int64(2) memory usage: 164.2 MB ###Markdown Preprocess data ###Code df = crypto_df.copy() # fill missing values df = df.reindex(range(df.index[0],df.index[-1]+60,60),method='pad') df = df.fillna(0) # rename column timestamp to Date df.rename({'timestamp': 'Date'}, axis=1, inplace=True) # rename Close to Price df.rename(columns={'Close': 'Price'}, inplace=True) # timestamp conversion df.Date = df.Date.apply(lambda d: datetime.fromtimestamp(int(d)).strftime('%Y-%m-%d')) # set index df.set_index('Date', inplace=True) df.head() # Convert to date array timesteps = df.index.to_numpy() prices = df['Price'].to_numpy() timesteps[:10], prices[:10] ###Output _____no_output_____ ###Markdown Modeling: Recurrent Neural Network LSTM ###Code HORIZON = 1 WINDOW_SIZE = 7 # Function to create labelled window data def get_labelled_windows(x, horizon=1): return x[:, :-horizon], x[:, -horizon:] # Test the window labelling function test_window, test_label = get_labelled_windows(tf.expand_dims(tf.range(8)+1, axis=0), horizon=HORIZON) print(f"Window: {tf.squeeze(test_window).numpy()} -> Label: {tf.squeeze(test_label).numpy()}") # Function to view NumPy arrays as windows def make_windows(x, window_size=7, horizon=1): window_step = np.expand_dims(np.arange(window_size+horizon), axis=0) window_indexes = window_step + np.expand_dims(np.arange(len(x)-(window_size+horizon-1)), axis=0).T windowed_array = x[window_indexes] windows, labels = get_labelled_windows(windowed_array, horizon=horizon) return windows, labels full_windows, full_labels = make_windows(prices, window_size=WINDOW_SIZE, horizon=HORIZON) len(full_windows), len(full_labels) # View the first 3 windows/labels for i in range(3): print(f"Window: {full_windows[i]} -> Label: {full_labels[i]}") # View the last 3 windows/labels for i in range(3): print(f"Window: {full_windows[i-3]} -> Label: {full_labels[i-3]}") # Function to create train-test-splits def make_train_test_splits(windows, labels, test_split=0.2): split_size = int(len(windows) * (1-test_split)) train_windows = windows[:split_size] train_labels = labels[:split_size] test_windows = windows[split_size:] test_labels = labels[split_size:] return train_windows, test_windows, train_labels, test_labels train_windows, test_windows, train_labels, test_labels = make_train_test_splits(full_windows, full_labels) len(train_windows), len(test_windows), len(train_labels), len(test_labels) train_windows[:5], train_labels[:5] import os # Function to implement a ModelCheckpoint callback with a specific filename def create_model_checkpoint(model_name, save_path="model_experiments"): return tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(save_path, model_name), verbose=0, save_best_only=True) tf.random.set_seed(42) # LSTM model with the Functional API inputs = layers.Input(shape=(WINDOW_SIZE)) x = layers.Lambda(lambda x: tf.expand_dims(x, axis=1))(inputs) x = layers.LSTM(128, activation="relu")(x) output = layers.Dense(HORIZON)(x) lstm_model = tf.keras.Model(inputs=inputs, outputs=output, name="model_5_lstm") # Compile model lstm_model.compile(loss="mae", optimizer=tf.keras.optimizers.Adam()) # Fit the model lstm_model.fit(train_windows, train_labels, epochs=100, verbose=0, batch_size=128, validation_data=(test_windows, test_labels), callbacks=[create_model_checkpoint(model_name=lstm_model.name)]) # Load in best version of the LSTM model lstm_model = tf.keras.models.load_model("model_experiments/model_5_lstm/") lstm_model.evaluate(test_windows, test_labels) def make_preds(model, input_data): forecast = model.predict(input_data) return tf.squeeze(forecast) # Make predictions with our LSTM model model_lstm_preds = make_preds(lstm_model, test_windows) model_lstm_preds[:10] ###Output _____no_output_____ ###Markdown Model Evaluation ###Code def evaluate_preds(y_true, y_pred): # Make sure float32 (for metric calculations) y_true = tf.cast(y_true, dtype=tf.float32) y_pred = tf.cast(y_pred, dtype=tf.float32) # Calculate various metrics mae = tf.keras.metrics.mean_absolute_error(y_true, y_pred) mse = tf.keras.metrics.mean_squared_error(y_true, y_pred) rmse = tf.sqrt(mse) mape = tf.keras.metrics.mean_absolute_percentage_error(y_true, y_pred) return {"mae": mae.numpy(), "mse": mse.numpy(), "rmse": rmse.numpy(), "mape": mape.numpy()} # Evaluate LSTM model model_lstm_results = evaluate_preds(y_true=tf.squeeze(test_labels), y_pred=model_lstm_preds) model_lstm_results ###Output _____no_output_____
4a_GenerateRASA_Conversational_Data.ipynb
###Markdown Script- **Input:** FAQ generated in 2b and listing entities generated in 2d.- **Output:** auto-generated nlu.yml for RASA solution. - Approx 300K intents ###Code root = './RASA_ConceptNet5/data/' processed = './Data/processing/Processed_Airbnb/' test = './Data/test' ###Output _____no_output_____ ###Markdown Import ###Code import pandas as pd ###Output _____no_output_____ ###Markdown Generate nlu.yml Gather FAQs ###Code FAQ1 = pd.read_csv(processed+'FAQ_1.json') FAQ2 = pd.read_csv(processed+'FAQ_2.json') FAQ3 = pd.read_csv(processed+'FAQ_3.json') FAQ = pd.concat([FAQ1, FAQ2, FAQ3], axis=0) FAQ.head(1) ###Output _____no_output_____ ###Markdown Add intent type ###Code FAQ['intent'] = 'affirm_FAQ' FAQ['type'] = 'intent' FAQ.head(5) ###Output _____no_output_____ ###Markdown Append entity and entity value to the intent. ###Code #Remove ? from the question #Append entity value and entity at the end and append '?' => [answer](wordid)? FAQ['question'] = FAQ['question'].str.replace(r'?', ' ',regex=True) FAQ.head(5) # FAQ['question'] = FAQ['question'].astype(str) +'['+FAQ['answer'].astype(str)+'](wordid)?' FAQ['question'] = FAQ['question'].astype(str) FAQ.head(5) ###Output _____no_output_____ ###Markdown Auto-generate NLU intents ###Code nlus = FAQ.groupby('intent')['question'].apply(list).to_dict() answer = FAQ["answer"].unique() nlu=[] query=[] entity=[] for val in answer: questionString1="find me close matches for a place near " questionString2="get me recommendations with features close to" questionString3="I am looking for a place with facilities similar to " questionString4="get me recommendations with facilities similar to " questionString5="find similar recommendations like a " questionString6="find places with amenities similar to " questionString7="find places suitable for " n = val nlu.append(questionString1+"["+str(n)+"](wordid)") nlu.append(questionString2+"["+str(n)+"](wordid)") nlu.append(questionString3+"["+str(n)+"](wordid)") nlu.append(questionString4+"["+str(n)+"](wordid)") nlu.append(questionString5+"["+str(n)+"](wordid)") nlu.append(questionString6+"["+str(n)+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) query.append(questionString3+" "+str(n)) query.append(questionString4+" "+str(n)) query.append(questionString5+" "+str(n)) query.append(questionString6+" "+str(n)) query.append(questionString7+" "+str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) # #;TYPE;INTENT;VALUE # f = open(root+"nlu.yml","a") # for intent in nlus: # f.write("\n- intent: {}\n".format(intent)) # f.write(" examples: |\n") # for value in nlus[intent]: # f.write(" - {}\n".format(value)) # f.write("\n- intent: {}\n".format("affirm_neo4j_conceptNet5_review")) # f.write(" examples: |\n") # for value in nlu: # f.write(" - {}\n".format(value.lower())) # f.close() ###Output _____no_output_____ ###Markdown Generate listings nlu ###Code import csv listing_reviews = pd.read_csv(processed+'listings_text_processed.csv') listing_reviews = listing_reviews[['neighbourhood','neighbourhood_cleansed','property_type' ,'room_type','accommodates','bathrooms','bathrooms_text' ,'bedrooms','beds','amenities','price']] neighborhood = listing_reviews['neighbourhood'].unique() loc = listing_reviews['neighbourhood'].unique() accomodates = listing_reviews['accommodates'].unique() bathrooms = listing_reviews['bathrooms'].unique() bedrooms = listing_reviews['bedrooms'].unique() beds = listing_reviews['beds'].unique() price = listing_reviews['price'].unique() bathrooms_text = listing_reviews['bathrooms_text'].unique() property_type = listing_reviews['property_type'].unique() room_type = listing_reviews['room_type'].unique() amenities = listing_reviews['amenities'].unique() nlu = [] for val in loc: questionString1="find me a place near " questionString2="get me recommendations with " questionString3="I am looking for a place with facilities like " questionString4="get me recommendations with facilities to " if ',' in val: neigh = val.split(',') for n in neigh: nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString3+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString4+"["+n.strip('\"').strip()+"](wordid)") else: n = val nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString3+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString4+"["+n.strip('\"').strip()+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) query.append(questionString3+" "+str(n)) query.append(questionString4+" "+str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) entity.append(str(n)) for val in neighborhood: questionString1="find me a place in the neighborhood of " questionString2="find me a place in " if ',' in val: neigh = val.split(',') for n in neigh: nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") else: n = val nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in accomodates: questionString1="find me a place that can accomodate " questionString2="find me a place for " n = str(val) nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in bathrooms: questionString1="find me a place with " questionString2="looking for a place with " n = str(val) nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) bathrooms") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) bathrooms") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in bedrooms: questionString1="find me a place with " questionString2="looking for a place with " n = str(val) nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) bedrooms") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) bedrooms") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in beds: questionString1="find me a place with " questionString2="looking for a place with " n = str(val) nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) beds") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) beds") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in price: questionString1="find me a place in the range of " questionString2="looking for a place within the price " n = val nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in bathrooms_text: questionString1="find me a place with bath that is " questionString2="looking for a place with bath features like " n = str(val) nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in amenities: questionString1="find me a place with amenities like " questionString2="I am looking for a place with amenities like " val = val.strip('[').strip(']') if ',' in val: neigh = val.split(',') for n in neigh: nlu.append(questionString1+"["+n.replace('"', '').strip()+"](wordid)") nlu.append(questionString2+"["+n.replace('"', '').strip()+"](wordid)") else: n = val nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in property_type: questionString1="find me a " questionString2="looking for a " if ',' in val: neigh = val.split(',') for n in neigh: nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) property") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) property") else: n = val nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) property") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) property") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) for val in room_type: questionString1="find me a " questionString2="looking for a" if ',' in val: neigh = val.split(',') for n in neigh: nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) room") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) room") else: n = val nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) room") nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) room") query.append(questionString1+" "+str(n)) query.append(questionString2+" "+str(n)) entity.append(str(n)) entity.append(str(n)) # #;TYPE;INTENT;VALUE # f = open(root+"nlu.yml","a") # f.write("\n- intent: {}\n".format("affirm_neo4j_conceptNet5_listing")) # f.write(" examples: |\n") # for value in nlu: # f.write(" - {}\n".format(value.lower())) # f.close() df = pd.DataFrame(list(zip(query, entity)), columns =['user_query', 'entity']) df df.to_csv(test+'/test_user_queries.csv') ###Output _____no_output_____
examples/double_dice.ipynb
###Markdown The double dice problemThis notebook demonstrates a way of doing simple Bayesian updates using the table method, with a Pandas DataFrame as the table.Copyright 2018 Allen DowneyMIT License: https://opensource.org/licenses/MIT ###Code # Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' import numpy as np import pandas as pd from fractions import Fraction ###Output _____no_output_____ ###Markdown The BayesTable classHere's the class that represents a Bayesian table. ###Code class BayesTable(pd.DataFrame): def __init__(self, hypo, prior=1, **options): columns = ['hypo', 'prior', 'likelihood', 'unnorm', 'posterior'] super().__init__(columns=columns, **options) self.hypo = hypo self.prior = prior def mult(self): self.unnorm = self.prior * self.likelihood def norm(self): nc = np.sum(self.unnorm) self.posterior = self.unnorm / nc return nc def update(self): self.mult() return self.norm() def reset(self): return BayesTable(self.hypo, self.posterior) ###Output _____no_output_____ ###Markdown The double dice problemSuppose I have a box that contains one each of 4-sided, 6-sided, 8-sided, and 12-sided dice. I choose a die at random, and roll it twicewithout letting you see the die or the outcome. I report that I gotthe same outcome on both rolls.1) What is the posterior probability that I rolled each of the dice?2) If I roll the same die again, what is the probability that I get the same outcome a third time?**Solution**Here's a `BayesTable` that represents the four hypothetical dice. ###Code hypo = [Fraction(sides) for sides in [4, 6, 8, 12]] table = BayesTable(hypo) ###Output _____no_output_____ ###Markdown Since we didn't specify prior probabilities, the default value is equal priors for all hypotheses. They don't have to be normalized, because we have to normalize the posteriors anyway.Now we can specify the likelihoods: if a die has `n` sides, the chance of getting the same outcome twice is `1/n`.So the likelihoods are: ###Code table.likelihood = 1/table.hypo table ###Output _____no_output_____ ###Markdown Now we can use `update` to compute the posterior probabilities: ###Code table.update() table table.posterior.astype(float) ###Output _____no_output_____ ###Markdown The 4-sided die is most likely because you are more likely to get doubles on a 4-sided die than on a 6-, 8-, or 12- sided die. Part twoThe second part of the problem asks for the (posterior predictive) probability of getting the same outcome a third time, if we roll the same die again.If the die has `n` sides, the probability of getting the same value again is `1/n`, which should look familiar.To get the total probability of getting the same outcome, we have to add up the conditional probabilities:```P(n | data) * P(same outcome | n)```The first term is the posterior probability; the second term is `1/n`. ###Code total = 0 for _, row in table.iterrows(): total += row.posterior / row.hypo total ###Output _____no_output_____ ###Markdown This calculation is similar to the first step of the update, so we can also compute it by1) Creating a new table with the posteriors from `table`.2) Adding the likelihood of getting the same outcome a third time.3) Computing the normalizing constant. ###Code table2 = table.reset() table2.likelihood = 1/table.hypo table2 table2.update() table2 ###Output _____no_output_____
Bears.ipynb
###Markdown Creating our own dataset from Google Images In this tutorial we will see how to easily create an image dataset through Google Images. Note: We will have to repeat these steps for any new category we want to Google (e.g once for dogs and once for cats). ###Code from fastai.vision import * ###Output _____no_output_____ ###Markdown Create directory and upload urls file into your serverChoosing an appropriate name for our labeled images. We can run these steps multiple times to create different labels. ###Code !mkdir data !mkdir data/bears folder = 'black' file = 'blk.txt' folder = 'teddys' file = 'ted.txt' folder= 'brown' file= 'brwnbears.txt' path = Path('data/bears') dest = path/folder dest.mkdir(parents=True, exist_ok=True) path = Path('data/bears') ###Output _____no_output_____ ###Markdown Download imagesNow we will need to download our images from their respective urls.fast.ai has a function that allows us to do just that. We just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.Let's download our images! Notice we can choose a maximum number of images to be downloaded. In this case we will not download all the urls.We will need to run this line once for every category. ###Code download_images(path/file, dest, max_pics=200) classes = ['teddys','brown','black'] for c in classes: print(c) verify_images(path/c, delete=True, max_size=500) ###Output teddys ###Markdown View data ###Code np.random.seed(42) data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) data.show_batch(rows=3, figsize=(7,8)) data.classes, data.c, len(data.train_ds), len(data.valid_ds) ###Output _____no_output_____ ###Markdown Train model ###Code learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(5) learn.save('stage-1') learn.unfreeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4)) learn.save('stage-2') ###Output _____no_output_____ ###Markdown Interpretation ###Code learn.load('stage-2') interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() ###Output _____no_output_____ ###Markdown Putting your model in productionFirst thing first, let's export the content of our Learner object for production: ###Code learn.export() defaults.device = torch.device('cpu') img = open_image(path/'teddys'/'00000011.jpg') img learn = load_learner(path) pred_class,pred_idx,outputs = learn.predict(img) pred_class ###Output _____no_output_____
module_2/module2.ipynb
###Markdown Module 2: From `datascience` to `pandas` Why `pandas`?Like the `datascience` package you learned and used in Data 8, `pandas` is a Python library used for data manipulation and analysis.However, `datascience` was developed as a pedagogical tool for Data 8, intended to help students become familiarized with python syntax as well as syntax associated with tabular data analysis. We decided to teach `datascience` first since its syntax is more intuitive and easier to use for students without much programming experience.On the other hand, `pandas` is an industrial strength package that is used in most data analysis projects in the real world. Learning how to use pandas would also make your projects easier to understand for other data scientists and extend the scope of influence your projects may have.Now that you've completed Data 8, it's a good time to translate the functions you've learned to `pandas`. Throughout this notebook, we will go over `pandas` by showing similar functions in both the `datascience` syntax and `pandas` syntax, as well as introduce you to some of the other functionalities that `pandas` provides. We will first import the packages we need. The following 3 lines imports `pandas`, `datascience`, and `numpy` respectively. Note that we imported `pandas` as `pd`, which means that when we call functions in `pandas`, we will always reference them with `pd` first.In Data 8, you saw something similar when we imported `numpy` as `np`: functions like taking `np.mean` or `np.random.choice` are all from the `numpy` package. ###Code import pandas as pd # This is how we import pandas into the environment. Typically, we use pd to refer to all pandas modules for short. from datascience import * import numpy as np ###Output _____no_output_____ ###Markdown Creating a Table Reading in a DatasetMost of the time, the data we want to analyze will be in a separate file, typically as a `.csv` file. In this case, we want to read the files in and convert them into a tabular format. Using the `datascience` package, we will want to read in the file as a table. To do this, we use the function `Table.read_table(file_path)`. In the example below, the `baby.csv` file is in the same folder as this notebook, so the relative file path from this notebook to the csv file is just `baby.csv`. ###Code # datascience baby_tbl = Table.read_table("baby.csv") baby_tbl.show(5) ###Output _____no_output_____ ###Markdown The syntax for reading in csv files in `pandas` is almost identical. `pandas` has a specific function to read in csv files called `pd.read_csv(file_path)`, with the same relative file path as its argument. the `dataframe.head()` function will display the first 5 rows of the data frame by default. If you want to specify the number of rows displayed, you can use`dataframe.head(num_rows)`.Similarly, if you want to see the last few rows of the data frame, you can use `dataframe.tail(num_rows)`. Try it out for yourself! ###Code # pandas baby_df = pd.read_csv("baby.csv") baby_df.head(5) ###Output _____no_output_____ ###Markdown Creating a Table from ScratchOften times, data in tabular format is preferred for analysis. But what if you already have the data, just not in tabular format? For example, if your data is scattered in arrays and lists, you can use that to build your own table from scratch. Using the `datascience` package, we can first create a blank table with a call to Table(), then add in the arrays as columns to this blank table as shown below. ###Code # datascience flowers_tbl = Table().with_columns( 'Number of petals', make_array(8, 34, 5), 'Name', make_array('lotus', 'sunflower', 'rose'), 'Color', make_array('pink', 'yellow', 'red') ) flowers_tbl ###Output _____no_output_____ ###Markdown There are multiple ways to do this in `pandas`. One way is intuitively very similar to how we did it with `datascience`. Here we use a dictionary object to represent the data -- don't worry if you're not familiar with them. We pass into the `data` argument `{"colname1": column1, "colname2": column2, ...}`.Notably, we build the `DataFrame` by the columns, using each list as a column and associating each column with its appropriate name. ###Code # pandas - method 1 flowers_df = pd.DataFrame(data = {'Number of petals': [8, 34, 5], 'Name': ['lotus', 'sunflower', 'rose'], 'Color': ['pink', 'yellow', 'red']}) flowers_df ###Output _____no_output_____ ###Markdown Another way to build the same table from scratch is by building it with rows. With this method, the data should be a list of lists, where each inner list contains the entries of one row. You might also notice that there is now a second argument, `columns`. Since we are passing in rows, we do not have the column names inside the `data` argument. This is why we use the `columns` argument to specify column names. ###Code # pandas - method 2 flowers_df = pd.DataFrame(data = [[8, 'lotus', 'pink'], [34, 'sunflower', 'yellow'], [5, 'rose', 'red']], columns = ['Number of petals', 'Name', 'Color']) flowers_df ###Output _____no_output_____ ###Markdown `pandas` datatypes Series vs. ArraysOne of the primary data types we saw when analyzing tabular data with the `datascience` package is the array.In `datascience`, the columns of the tables consisted of arrays. In `pandas`, there is a very similar, but slightly different data type called a `Series`. You can access the values of a column in a Table using the `tbl.column(column_name)` function as follows. When using `tbl.column`, the values of the selected column will be returned as an array. ###Code ## datascience baby_tbl.column("Birth.Weight") ###Output _____no_output_____ ###Markdown Similarly in `pandas`, you can access the values of a particular column by using `dataframe[column_name]`. The data frame object is introduced in the next section, for now you can just understand it as the `pandas` equivalent to a table. `dataframe[column_name]` will return a `Series` instead of an array. ###Code ## pandas baby_df["Birth.Weight"] ###Output _____no_output_____ ###Markdown A `Series` object is basically an array with indices for each data point. In the above example, the first element in the `Birth.Weight` column is the integer 120. The corresponding index is 0.If we want just the data as an array without the index, we can use the `Series.values` attribute. ###Code baby_df["Birth.Weight"].values ###Output _____no_output_____ ###Markdown DataFrames vs. TablesThe following is our standard `datascience` Table. It is basically a collection of arrays, with column names. ###Code # datascience baby_tbl.head() ###Output _____no_output_____ ###Markdown A `pandas` `DataFrame` can be thought of as a collection of Series, all of which have the same index. The resulting `DataFrame` consists of columns where each column is a `Series` and each row has a unique index. ###Code # pandas baby_df.head() ###Output _____no_output_____ ###Markdown The number of rows in the `Table` can be found as such: ###Code # datascience baby_tbl.num_rows ###Output _____no_output_____ ###Markdown Similarly, for the number of columns: ###Code # datascience baby_tbl.num_columns ###Output _____no_output_____ ###Markdown The number of rows and columns in a `DataFrame` can be accessed together using the `.shape` attribute. Notice that the index is not counted as a column. ###Code # pandas baby_df.shape ###Output _____no_output_____ ###Markdown To get just the number of rows, we want the 0th element. ###Code # pandas baby_df.shape[0] ###Output _____no_output_____ ###Markdown For just the number of columns, we want the 1st element. ###Code # pandas baby_df.shape[1] ###Output _____no_output_____ ###Markdown Indices The row labels of a `DataFrame` are collectively called the index. It helps to identify each row. By default, the index values are the row numbers, with the first row having index 0. ###Code # pandas baby_df.head() ###Output _____no_output_____ ###Markdown We can access the index of a `DataFrame` by calling `DataFrame.index`. ###Code baby_df.index ###Output _____no_output_____ ###Markdown That doesn't seem too meaningful. We can access the values of the index using `.values`. ###Code # pandas baby_df.index.values ###Output _____no_output_____ ###Markdown In addition, we can set the index to whatever we want it to be. So, instead of index going from 0 to 1173, we can change it to go from 1 to 1174. ###Code # pandas baby_df.set_index(np.arange(1, 1175)) ###Output _____no_output_____ ###Markdown Let's look at another example. ###Code flowers_df ###Output _____no_output_____ ###Markdown The labels in an index of a `DataFrame` do not have to be intergers; they can also be strings. We can also use one of the data columns to be the index itself. Here is an example in which we are setting the index to be the `Name` column. ###Code # pandas flowers_df = flowers_df.set_index('Name') flowers_df ###Output _____no_output_____ ###Markdown Subsetting Data Selecting ColumnsSometimes the entire dataset contains too many columns, and we are only interested in some of the columns. In these situations, we would want to be able to select and display a subset of the columns from the original table. We dicuss some of these methods below. In the `datascience` package, the `select` function is used. If we want the returned columns to be a table as well, we use the syntax `tbl.select(col_name1, col_name2, ...)` ###Code # datascience # Selects the columns "Number of petals" and "Color". flowers_tbl.select("Number of petals", "Name") ###Output _____no_output_____ ###Markdown In `pandas`, there are many ways to achieve the same result. For one, we can use the function `loc`, shown below. The first argument of `loc` is which rows we want to select, and since we want all of the rows, just a colon ":" would indicate all rows. The second argument selects the columns we want. If we want more than one column, we need to pass in the column names as a list for the `loc` to return a dataframe object. ###Code # pandas # Selects all rows and the columns "Number of petals" and "Color". flowers_df.loc[:,["Number of petals", "Color"]] ###Output _____no_output_____ ###Markdown If you pass in a single column name as a list, it will still return a dataframe object with one column. ###Code # pandas # Selects all rows but only the column "Number of petals". Returns a DataFrame object. flowers_df.loc[:,["Number of petals"]] ###Output _____no_output_____ ###Markdown But if you pass in the column name as a string, pandas will recognize that you only have one column, and return a `Series` instead. ###Code # pandas # Selects all rows but only the column "Number of petals". Returns a series object. flowers_df.loc[:,"Number of petals"] ###Output _____no_output_____ ###Markdown Another way to subset data in `pandas` is to use `iloc`. Unlike `loc`, which accepts column names as arguments, `iloc` only accepts numerical indices as its arguments. The order of arguments remain the same, with the rows being the first argument and the columns being the second argument. Here the `0:1` denotes a range and means that we want all columns indexed 0 through 1. In Python, ranges are generally left inclusive and right exclusive (so that only column 0 is selected here). ###Code # pandas # Selects all rows and the 0th-1st column (not inclusive of the 1st column) flowers_df.iloc[:,0:1] ###Output _____no_output_____ ###Markdown As a side note, we can also subset tables' rows by selecting row indices. Since we've set the index of the flowers table to be the name of the flower, we can directly pass in the row indices as a list as the first argument. Note that here, `loc` is actually left and right inclusive. ###Code # pandas # Selects the rows with index values "sunflower" and "lotus", and all columns flowers_df.loc[["sunflower", "lotus"], :] # pandas # Selects the rows with index values from 0 to 3 (inclusive of 0 and 3), and all columns baby_df.loc[0:3,:] ###Output _____no_output_____ ###Markdown `loc` and `iloc` are very powerful functions in `pandas`. Here are 2 more examples on the `baby` table, let's see what they do: ###Code # pandas # Selects the 0th and 2nd column, and all rows of the table baby_df.iloc[:, [0,2]] ###Output _____no_output_____ ###Markdown If we want to only select a subset of columns from the table, there exists a special case short-cut where we drop the `.loc` and `:` entirely: ###Code # pandas # Selects the columns "Birth.Weight" and "Maternal.Age" with all rows baby_df[["Birth.Weight", "Maternal.Age"]] ###Output _____no_output_____ ###Markdown Getting a ValueWhat if you want to single out one entry of your entire table? This often occurs when we want to max or min value after sorting the table, for example:- *What is the name of the flower with the most number of petals?*- *How heavy was the baby that went through the longest gestational days?* When we want a specific entry using the `datascience` package, we need to first use `tbl.column` to fetch an array out of a table, then use `.item` to retrieve the element's value. In the code below, we get the birth weight of the first baby recorded in this dataset. ###Code # datascience # Get the first item from the "Birth.Weight" column baby_tbl.column("Birth.Weight").item(0) ###Output _____no_output_____ ###Markdown In `pandas`, the syntax for getting a single element is a lot less verbose. Remember `loc` and `iloc`? Since these functions have the ability to subset rows and columns at the same time, we are going to use that functionality here. We pass in 0 as the row selector, since we only want the first entry of the "Birth.Weight" column (the entry at the 0th index). ###Code # pandas # Get the value at row with index 0 and column with label "Birth.Weight". baby_df.loc[0,"Birth.Weight"] ###Output _____no_output_____ ###Markdown Similarly with iloc, we are just passing in 0 as the first and second argument since we want the entry located at the first row and first column, which are both indexed at 0. ###Code # pandas # Get the value at the 0th row and 0th column. baby_df.iloc[0,0] # pandas # Get the rows with indices 0 to 5 (inclusive) of the "Birth.Weight" column baby_df.loc[0:5, "Birth.Weight"] # pandas # Select the first five columns of the first two rows baby_df.iloc[0:2, 0:5] ###Output _____no_output_____ ###Markdown Methods Filtering and Boolean Indexing With the `datascience` package, we can filter a table by only returning rows that satisfy a specific condition. ###Code # datascience # Returns all of the rows where "Birth.Weight" is greater than 120 baby_tbl.where('Birth.Weight', are.above(120)) ###Output _____no_output_____ ###Markdown Equivalently, we can do this in `pandas` by "boolean indexing". The expression below returns a boolean series where an entry is `True` if it satisfies the condition and `False` if it doesn't. ###Code # pandas baby_df['Birth.Weight'] > 120 ###Output _____no_output_____ ###Markdown If we want to filter our `pandas` dataframe for all rows that satisfies Birth.Weight > 120, we can pass the boolean series into the row argument of `.loc`. The idea is that we only want the rows where the "boolean index" is `True`. ###Code # pandas # Select all rows that are True in the boolean series baby_df['Birth.Weight'] > 120. baby_df.loc[baby_df['Birth.Weight'] > 120, :] ###Output _____no_output_____ ###Markdown Notably, `.loc` returns all columns by default so we can omit the column argument and get the same result. ###Code # pandas # Select all rows that are True in the boolean series baby_df['Birth.Weight'] > 120. baby_df.loc[baby_df['Birth.Weight'] > 120] ###Output _____no_output_____ ###Markdown Boolean indexing is a very popular way to conduct filtering in `pandas`. As such, there exists another special case short-ahnd where we don't need the `.loc` or the `:`. ###Code # pandas # Select all rows that are True in the boolean series baby_df['Birth.Weight'] > 120. baby_df[baby_df['Birth.Weight'] > 120] ###Output _____no_output_____ ###Markdown In general, a filtering expression of the form `tbl.where(column, predicate)` in the `datascience` library takes the form `df.loc[criterion]` in `pandas`. Here are a few more examples: ###Code # datascience # Return all rows where Maternal.Height is greater than or equal to 63. baby_tbl.where('Maternal.Height', are.above_or_equal_to(63)) # pandas # Return all rows where Maternal.Height is greater than or equal to 63. baby_df[baby_df['Maternal.Height'] >= 63] # datascience # Return all rows where Maternal.Smoker is True. baby_tbl.where('Maternal.Smoker', are.equal_to(True)) # pandas # Return all rows where Maternal.Smoker is True. baby_df.loc[baby_df['Maternal.Smoker'] == True] ###Output _____no_output_____ ###Markdown Filtering on Multiple Conditions We can also filter on multiple conditions. If we want records (rows) where all of the conditions are true, we separate our criterion by the `&` symbol, where `&` represents *and*.`df.loc[(boolean series 1) & (boolean series 2) & (boolean series 2)]`If we just want one of the conditions to be true, we separate our criterion by `|` symbols, where `|` represents *or*.`df.loc[(boolean series 1) | (boolean series 2) | (boolean series 2)]` ###Code # datascience # Return all rows where Gestational.Days is between 270 and 280. baby_tbl.where('Gestational.Days', are.between(270, 280)) # pandas # Select all rows where Gestational.Days are above or equal to 270, but less than 280. baby_df.loc[(baby_df['Gestational.Days'] >= 270) & (baby_df['Gestational.Days'] < 280)] ###Output _____no_output_____
ml/recommendation-systems/recommendation-systems.ipynb
###Markdown Copyright 2018 Google LLC. ###Code # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages. ###Code # @title Imports (run this cell) from __future__ import print_function import numpy as np import pandas as pd import collections from mpl_toolkits.mplot3d import Axes3D from IPython import display from matplotlib import pyplot as plt import sklearn import sklearn.manifold import tensorflow.compat.v1 as tf tf.disable_v2_behavior() tf.logging.set_verbosity(tf.logging.ERROR) # Add some convenience functions to Pandas DataFrame. pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.3f}'.format def mask(df, key, function): """Returns a filtered dataframe, by applying function to key""" return df[function(df[key])] def flatten_cols(df): df.columns = [' '.join(col).strip() for col in df.columns.values] return df pd.DataFrame.mask = mask pd.DataFrame.flatten_cols = flatten_cols # Install Altair and activate its colab renderer. print("Installing Altair...") !pip install git+git://github.com/altair-viz/altair.git import altair as alt alt.data_transformers.enable('default', max_rows=None) alt.renderers.enable('colab') print("Done installing Altair.") # Install spreadsheets and import authentication module. USER_RATINGS = False !pip install --upgrade -q gspread from google.colab import auth import gspread from oauth2client.client import GoogleCredentials ###Output _____no_output_____ ###Markdown We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings. ###Code # @title Load the MovieLens data (run this cell). # Download MovieLens data. print("Downloading movielens data...") from urllib.request import urlretrieve import zipfile urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip") zip_ref = zipfile.ZipFile('movielens.zip', "r") zip_ref.extractall() print("Done. Dataset contains:") print(zip_ref.read('ml-100k/u.info')) # Load each data set (users, movies, and ratings). users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] users = pd.read_csv( 'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1') ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv( 'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1') # The movies file contains a binary feature for each genre. genre_cols = [ "genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] movies_cols = [ 'movie_id', 'title', 'release_date', "video_release_date", "imdb_url" ] + genre_cols movies = pd.read_csv( 'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1') # Since the ids start at 1, we shift them to start at 0. users["user_id"] = users["user_id"].apply(lambda x: str(x-1)) movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1)) movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1]) ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1)) ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1)) ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Compute the number of movies to which a genre is assigned. genre_occurences = movies[genre_cols].sum().to_dict() # Since some movies can belong to more than one genre, we create different # 'genre' columns as follows: # - all_genres: all the active genres of the movie. # - genre: randomly sampled from the active genres. def mark_genres(movies, genres): def get_random_genre(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return np.random.choice(active) def get_all_genres(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return '-'.join(active) movies['genre'] = [ get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])] movies['all_genres'] = [ get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])] mark_genres(movies, genre_cols) # Create one merged DataFrame containing all the movielens data. movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id') # Utility to split the data into training and test sets. def split_dataframe(df, holdout_fraction=0.1): """Splits a DataFrame into training and test sets. Args: df: a dataframe. holdout_fraction: fraction of dataframe rows to use in the test set. Returns: train: dataframe for training test: dataframe for testing """ test = df.sample(frac=holdout_fraction, replace=False) train = df[~df.index.isin(test.index)] return train, test ###Output _____no_output_____ ###Markdown I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features. ###Code users.describe() ###Output _____no_output_____ ###Markdown We can also print some basic statistics describing the categorical user features ###Code users.describe(include=[np.object]) ###Output _____no_output_____ ###Markdown We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart. ###Code # @title Altair visualization code (run this cell) # The following functions are used to generate interactive Altair charts. # We will display histograms of the data, sliced by a given attribute. # Create filters to be used to slice the data. occupation_filter = alt.selection_multi(fields=["occupation"]) occupation_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y("occupation:N"), color=alt.condition( occupation_filter, alt.Color("occupation:N", scale=alt.Scale(scheme='category20')), alt.value("lightgray")), ).properties(width=300, height=300, selection=occupation_filter) # A function that generates a histogram of filtered data. def filtered_hist(field, label, filter): """Creates a layered chart of histograms. The first layer (light gray) contains the histogram of the full data, and the second contains the histogram of the filtered data. Args: field: the field for which to generate the histogram. label: String label of the histogram. filter: an alt.Selection object to be used to filter the data. """ base = alt.Chart().mark_bar().encode( x=alt.X(field, bin=alt.Bin(maxbins=10), title=label), y="count()", ).properties( width=300, ) return alt.layer( base.transform_filter(filter), base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)), ).resolve_scale(y='independent') ###Output _____no_output_____ ###Markdown Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations? ###Code users_ratings = ( ratings .groupby('user_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols() .merge(users, on='user_id') ) # Create a chart for the count, and one for the mean. alt.hconcat( filtered_hist('rating count', '# ratings / user', occupation_filter), filtered_hist('rating mean', 'mean user rating', occupation_filter), occupation_chart, data=users_ratings) ###Output _____no_output_____ ###Markdown MoviesIt is also useful to look at information about the movies and their ratings. ###Code movies_ratings = movies.merge( ratings .groupby('movie_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols(), on='movie_id') genre_filter = alt.selection_multi(fields=['genre']) genre_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y('genre'), color=alt.condition( genre_filter, alt.Color("genre:N"), alt.value('lightgray')) ).properties(height=300, selection=genre_filter) (movies_ratings[['title', 'rating count', 'rating mean']] .sort_values('rating count', ascending=False) .head(10)) (movies_ratings[['title', 'rating count', 'rating mean']] .mask('rating count', lambda x: x > 20) .sort_values('rating mean', ascending=False) .head(10)) ###Output _____no_output_____ ###Markdown Finally, the last chart shows the distribution of the number of ratings and average rating. ###Code # Display the number of ratings and average rating per movie. alt.hconcat( filtered_hist('rating count', '# ratings / movie', genre_filter), filtered_hist('rating mean', 'mean movie rating', genre_filter), genre_chart, data=movies_ratings) ###Output _____no_output_____ ###Markdown II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each row $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`. ###Code def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: A tf.SparseTensor representing the ratings matrix. """ # ========================= Complete this section ============================ # indices = # values = # ============================================================================ return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) #@title Solution def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: a tf.SparseTensor representing the ratings matrix. """ indices = ratings_df[['user_id', 'movie_id']].values values = ratings_df['rating'].values return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) ###Output _____no_output_____ ###Markdown Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful. ###Code def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ # ========================= Complete this section ============================ # loss = # ============================================================================ return loss #@title Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.gather_nd( tf.matmul(user_embeddings, movie_embeddings, transpose_b=True), sparse_ratings.indices) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible. ###Code #@title Alternate Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.reduce_sum( tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) * tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]), axis=1) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'. ###Code USER_RATINGS = True #@param {type:"boolean"} # @title Run to create a spreadsheet, then use it to enter your ratings. # Authenticate user. if USER_RATINGS: auth.authenticate_user() gc = gspread.authorize(GoogleCredentials.get_application_default()) # Create the spreadsheet and print a link to it. try: sh = gc.open('MovieLens-test') except(gspread.SpreadsheetNotFound): sh = gc.create('MovieLens-test') worksheet = sh.sheet1 titles = movies['title'].values cell_list = worksheet.range(1, 1, len(titles), 1) for cell, title in zip(cell_list, titles): cell.value = title worksheet.update_cells(cell_list) print("Link to the spreadsheet: " "https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id)) ###Output _____no_output_____ ###Markdown Run the next cell to load your ratings and add them to the main `ratings` DataFrame. ###Code # @title Run to load your ratings. # Load the ratings from the spreadsheet and create a DataFrame. if USER_RATINGS: my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index() my_ratings = my_ratings[my_ratings[1] != ''] my_ratings = pd.DataFrame({ 'user_id': "943", 'movie_id': list(map(str, my_ratings['index'])), 'rating': list(map(float, my_ratings[1])), }) # Remove previous ratings. ratings = ratings[ratings.user_id != "943"] # Add new ratings. ratings = ratings.append(my_ratings, ignore_index=True) # Add new user to the users DataFrame. if users.shape[0] == 943: users = users.append(users.iloc[942], ignore_index=True) users["user_id"][943] = "943" print("Added your %d ratings; you have great taste!" % len(my_ratings)) ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']]) ###Output _____no_output_____ ###Markdown III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']``` ###Code # @title CFModel helper class (run this cell) class CFModel(object): """Simple class that represents a collaborative filtering model""" def __init__(self, embedding_vars, loss, metrics=None): """Initializes a CFModel. Args: embedding_vars: A dictionary of tf.Variables. loss: A float Tensor. The loss to optimize. metrics: optional list of dictionaries of Tensors. The metrics in each dictionary will be plotted in a separate figure during training. """ self._embedding_vars = embedding_vars self._loss = loss self._metrics = metrics self._embeddings = {k: None for k in embedding_vars} self._session = None @property def embeddings(self): """The embeddings dictionary.""" return self._embeddings def train(self, num_iterations=100, learning_rate=1.0, plot_results=True, optimizer=tf.train.GradientDescentOptimizer): """Trains the model. Args: iterations: number of iterations to run. learning_rate: optimizer learning rate. plot_results: whether to plot the results at the end of training. optimizer: the optimizer to use. Default to GradientDescentOptimizer. Returns: The metrics dictionary evaluated at the last iteration. """ with self._loss.graph.as_default(): opt = optimizer(learning_rate) train_op = opt.minimize(self._loss) local_init_op = tf.group( tf.variables_initializer(opt.variables()), tf.local_variables_initializer()) if self._session is None: self._session = tf.Session() with self._session.as_default(): self._session.run(tf.global_variables_initializer()) self._session.run(tf.tables_initializer()) tf.train.start_queue_runners() with self._session.as_default(): local_init_op.run() iterations = [] metrics = self._metrics or ({},) metrics_vals = [collections.defaultdict(list) for _ in self._metrics] # Train and append results. for i in range(num_iterations + 1): _, results = self._session.run((train_op, metrics)) if (i % 10 == 0) or i == num_iterations: print("\r iteration %d: " % i + ", ".join( ["%s=%f" % (k, v) for r in results for k, v in r.items()]), end='') iterations.append(i) for metric_val, result in zip(metrics_vals, results): for k, v in result.items(): metric_val[k].append(v) for k, v in self._embedding_vars.items(): self._embeddings[k] = v.eval() if plot_results: # Plot the metrics. num_subplots = len(metrics)+1 fig = plt.figure() fig.set_size_inches(num_subplots*10, 8) for i, metric_vals in enumerate(metrics_vals): ax = fig.add_subplot(1, num_subplots, i+1) for k, v in metric_vals.items(): ax.plot(iterations, v, label=k) ax.set_xlim([1, num_iterations]) ax.legend() return results ###Output _____no_output_____ ###Markdown Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses. ###Code def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. # ========================= Complete this section ============================ # A_train = # A_test = # ============================================================================ # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # train_loss = # test_loss = # ============================================================================ metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) #@title Solution def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) train_loss = sparse_mean_square_error(A_train, U, V) test_loss = sparse_mean_square_error(A_test, U, V) metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) ###Output _____no_output_____ ###Markdown Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings. ###Code # Build the CF model and train it. model = build_model(ratings, embedding_dim=30, init_stddev=0.5) model.train(num_iterations=1000, learning_rate=10.) ###Output _____no_output_____ ###Markdown The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array. ###Code DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ # ========================= Complete this section ============================ # scores = # ============================================================================ return scores #@title Solution DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ u = query_embedding V = item_embeddings if measure == COSINE: V = V / np.linalg.norm(V, axis=1, keepdims=True) u = u / np.linalg.norm(u) scores = u.dot(V.T) return scores ###Output _____no_output_____ ###Markdown Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding. ###Code # @title User recommendations and nearest neighbors (run this cell) def user_recommendations(model, measure=DOT, exclude_rated=False, k=6): if USER_RATINGS: scores = compute_scores( model.embeddings["user_id"][943], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'movie_id': movies['movie_id'], 'titles': movies['title'], 'genres': movies['all_genres'], }) if exclude_rated: # remove movies that are already rated rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)] display.display(df.sort_values([score_key], ascending=False).head(k)) def movie_neighbors(model, title_substring, measure=DOT, k=6): # Search for movie ids that match the given substring. ids = movies[movies['title'].str.contains(title_substring)].index.values titles = movies.iloc[ids]['title'].values if len(titles) == 0: raise ValueError("Found no movies with title %s" % title_substring) print("Nearest neighbors of : %s." % titles[0]) if len(titles) > 1: print("[Found more than one matching movie. Other candidates: {}]".format( ", ".join(titles[1:]))) movie_id = ids[0] scores = compute_scores( model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'titles': movies['title'], 'genres': movies['all_genres'] }) display.display(df.sort_values([score_key], ascending=False).head(k)) ###Output _____no_output_____ ###Markdown Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you. ###Code user_recommendations(model, measure=COSINE, k=5) ###Output _____no_output_____ ###Markdown How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies. ###Code movie_neighbors(model, "Aladdin", DOT) movie_neighbors(model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell. ###Code # @title Embedding Visualization code (run this cell) def movie_embedding_norm(models): """Visualizes the norm and number of ratings of the movie embeddings. Args: model: A MFModel object. """ if not isinstance(models, list): models = [models] df = pd.DataFrame({ 'title': movies['title'], 'genre': movies['genre'], 'num_ratings': movies_ratings['rating count'], }) charts = [] brush = alt.selection_interval() for i, model in enumerate(models): norm_key = 'norm'+str(i) df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1) nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x='num_ratings', y=norm_key, color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray')) ).properties( selection=nearest).add_selection(brush) text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode( x='num_ratings', y=norm_key, text=alt.condition(nearest, 'title', alt.value(''))) charts.append(alt.layer(base, text)) return alt.hconcat(*charts, data=df) def visualize_movie_embeddings(data, x, y): nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x=x, y=y, color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")), ).properties( width=600, height=600, selection=nearest) text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode( x=x, y=y, text=alt.condition(nearest, 'title', alt.value(''))) return alt.hconcat(alt.layer(base, text), genre_chart, data=data) def tsne_movie_embeddings(model): """Visualizes the movie embeddings, projected using t-SNE with Cosine measure. Args: model: A MFModel object. """ tsne = sklearn.manifold.TSNE( n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0, init='pca', verbose=True, n_iter=400) print('Running t-SNE...') V_proj = tsne.fit_transform(model.embeddings["movie_id"]) movies.loc[:,'x'] = V_proj[:, 0] movies.loc[:,'y'] = V_proj[:, 1] return visualize_movie_embeddings(movies, 'x', 'y') movie_embedding_norm(model) ###Output _____no_output_____ ###Markdown Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies? ###Code #@title Solution model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05) model_lowinit.train(num_iterations=1000, learning_rate=10.) movie_neighbors(model_lowinit, "Aladdin", DOT) movie_neighbors(model_lowinit, "Aladdin", COSINE) movie_embedding_norm([model, model_lowinit]) ###Output _____no_output_____ ###Markdown Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/). ###Code tsne_movie_embeddings(model_lowinit) ###Output _____no_output_____ ###Markdown You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$. ###Code def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # error_train = # error_test = # gravity_loss = # regularization_loss = # ============================================================================ total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error': error_train, 'test_error': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) # @title Solution def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) error_train = sparse_mean_square_error(A_train, U, V) error_test = sparse_mean_square_error(A_test, U, V) gravity_loss = gravity_coeff * gravity(U, V) regularization_loss = regularization_coeff * ( tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value) total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error_observed': error_train, 'test_error_observed': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) ###Output _____no_output_____ ###Markdown It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions. ###Code reg_model = build_regularized_model( ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35, init_stddev=.05) reg_model.train(num_iterations=2000, learning_rate=20.) ###Output _____no_output_____ ###Markdown Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better. ###Code user_recommendations(reg_model, DOT, exclude_rated=True, k=10) ###Output _____no_output_____ ###Markdown Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings. ###Code movie_neighbors(reg_model, "Aladdin", DOT) movie_neighbors(reg_model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously. ###Code movie_embedding_norm([model, model_lowinit, reg_model]) # Visualize the embeddings tsne_movie_embeddings(reg_model) ###Output _____no_output_____ ###Markdown We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id. ###Code rated_movies = (ratings[["user_id", "movie_id"]] .groupby("user_id", as_index=False) .aggregate(lambda x: list(x))) rated_movies.head() ###Output _____no_output_____ ###Markdown We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year. ###Code #@title Batch generation code (run this cell) years_dict = { movie: year for movie, year in zip(movies["movie_id"], movies["year"]) } genres_dict = { movie: genres.split('-') for movie, genres in zip(movies["movie_id"], movies["all_genres"]) } def make_batch(ratings, batch_size): """Creates a batch of examples. Args: ratings: A DataFrame of ratings such that examples["movie_id"] is a list of movies rated by a user. batch_size: The batch size. """ def pad(x, fill): return pd.DataFrame.from_dict(x).fillna(fill).values movie = [] year = [] genre = [] label = [] for movie_ids in ratings["movie_id"].values: movie.append(movie_ids) genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]]) year.append([years_dict[movie_id] for movie_id in movie_ids]) label.append([int(movie_id) for movie_id in movie_ids]) features = { "movie_id": pad(movie, ""), "year": pad(year, ""), "genre": pad(genre, ""), "label": pad(label, -1) } batch = ( tf.data.Dataset.from_tensor_slices(features) .shuffle(1000) .repeat() .batch(batch_size) .make_one_shot_iterator() .get_next()) return batch def select_random(x): """Selectes a random elements from each row of x.""" def to_float(x): return tf.cast(x, tf.float32) def to_int(x): return tf.cast(x, tf.int64) batch_size = tf.shape(x)[0] rn = tf.range(batch_size) nnz = to_float(tf.count_nonzero(x >= 0, axis=1)) rnd = tf.random_uniform([batch_size]) ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1) return to_int(tf.gather_nd(x, ids)) ###Output _____no_output_____ ###Markdown Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$. ###Code def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A sparse tensor of dense_shape [batch_size, 1], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # ========================= Complete this section ============================ # logits = # loss = # ============================================================================ return loss # @title Solution def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A tensor of [batch_size], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # Verify that the embddings have compatible dimensions user_emb_dim = user_embeddings.shape[1].value movie_emb_dim = movie_embeddings.shape[1].value if user_emb_dim != movie_emb_dim: raise ValueError( "The user embedding dimension %d should match the movie embedding " "dimension % d" % (user_emb_dim, movie_emb_dim)) logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) return loss ###Output _____no_output_____ ###Markdown Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.![Softmax model](https://github.com/google/eng-edu/blob/master/ml/recommendation-systems/images/softmax-model.png?raw=true)Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise). ###Code def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") # ========================= Complete this section ============================ # train_loss = # test_loss = # test_precision_at_10 = # ============================================================================ metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) # @title Solution def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") test_loss = softmax_loss( test_user_embeddings, movie_embeddings, test_labels) train_loss = softmax_loss( train_user_embeddings, movie_embeddings, train_labels) _, test_precision_at_10 = tf.metrics.precision_at_k( labels=test_labels, predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True), k=10) metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) ###Output _____no_output_____ ###Markdown Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column). ###Code # Create feature embedding columns def make_embedding_col(key, embedding_dim): categorical_col = tf.feature_column.categorical_column_with_vocabulary_list( key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0) return tf.feature_column.embedding_column( categorical_column=categorical_col, dimension=embedding_dim, # default initializer: trancated normal with stddev=1/sqrt(dimension) combiner='mean') with tf.Graph().as_default(): softmax_model = build_softmax_model( rated_movies, embedding_cols=[ make_embedding_col("movie_id", 35), make_embedding_col("genre", 3), make_embedding_col("year", 2), ], hidden_dims=[35]) softmax_model.train( learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer) ###Output _____no_output_____ ###Markdown Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights. ###Code movie_neighbors(softmax_model, "Aladdin", DOT) movie_neighbors(softmax_model, "Aladdin", COSINE) movie_embedding_norm([reg_model, softmax_model]) tsne_movie_embeddings(softmax_model) ###Output _____no_output_____ ###Markdown Copyright 2018 Google LLC. ###Code # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages. ###Code # @title Imports (run this cell) from __future__ import print_function import numpy as np import pandas as pd import collections from mpl_toolkits.mplot3d import Axes3D from IPython import display from matplotlib import pyplot as plt import sklearn import sklearn.manifold import tensorflow.compat.v1 as tf tf.disable_v2_behavior() tf.logging.set_verbosity(tf.logging.ERROR) # Add some convenience functions to Pandas DataFrame. pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.3f}'.format def mask(df, key, function): """Returns a filtered dataframe, by applying function to key""" return df[function(df[key])] def flatten_cols(df): df.columns = [' '.join(col).strip() for col in df.columns.values] return df pd.DataFrame.mask = mask pd.DataFrame.flatten_cols = flatten_cols # Install Altair and activate its colab renderer. print("Installing Altair...") !pip install git+git://github.com/altair-viz/altair.git import altair as alt alt.data_transformers.enable('default', max_rows=None) alt.renderers.enable('colab') print("Done installing Altair.") # Install spreadsheets and import authentication module. USER_RATINGS = False !pip install --upgrade -q gspread from google.colab import auth import gspread from oauth2client.client import GoogleCredentials ###Output _____no_output_____ ###Markdown We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings. ###Code # @title Load the MovieLens data (run this cell). # Download MovieLens data. print("Downloading movielens data...") from urllib.request import urlretrieve import zipfile urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip") zip_ref = zipfile.ZipFile('movielens.zip', "r") zip_ref.extractall() print("Done. Dataset contains:") print(zip_ref.read('ml-100k/u.info')) # Load each data set (users, movies, and ratings). users_cols = ['user_id', 'age', 'gender', 'occupation', 'zip_code'] users = pd.read_csv( 'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1') ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv( 'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1') # The movies file contains a binary feature for each genre. genre_cols = [ "genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] movies_cols = [ 'movie_id', 'title', 'release_date', "video_release_date", "imdb_url" ] + genre_cols movies = pd.read_csv( 'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1') # Since the ids start at 1, we shift them to start at 0. users["user_id"] = users["user_id"].apply(lambda x: str(x-1)) movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1)) movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1]) ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1)) ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1)) ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Compute the number of movies to which a genre is assigned. genre_occurences = movies[genre_cols].sum().to_dict() # Since some movies can belong to more than one genre, we create different # 'genre' columns as follows: # - all_genres: all the active genres of the movie. # - genre: randomly sampled from the active genres. def mark_genres(movies, genres): def get_random_genre(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return np.random.choice(active) def get_all_genres(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return '-'.join(active) movies['genre'] = [ get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])] movies['all_genres'] = [ get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])] mark_genres(movies, genre_cols) # Create one merged DataFrame containing all the movielens data. movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id') # Utility to split the data into training and test sets. def split_dataframe(df, holdout_fraction=0.1): """Splits a DataFrame into training and test sets. Args: df: a dataframe. holdout_fraction: fraction of dataframe rows to use in the test set. Returns: train: dataframe for training test: dataframe for testing """ test = df.sample(frac=holdout_fraction, replace=False) train = df[~df.index.isin(test.index)] return train, test ###Output _____no_output_____ ###Markdown I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features. ###Code users.describe() ###Output _____no_output_____ ###Markdown We can also print some basic statistics describing the categorical user features ###Code users.describe(include=[np.object]) ###Output _____no_output_____ ###Markdown We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart. ###Code # @title Altair visualization code (run this cell) # The following functions are used to generate interactive Altair charts. # We will display histograms of the data, sliced by a given attribute. # Create filters to be used to slice the data. occupation_filter = alt.selection_multi(fields=["occupation"]) occupation_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y("occupation:N"), color=alt.condition( occupation_filter, alt.Color("occupation:N", scale=alt.Scale(scheme='category20')), alt.value("lightgray")), ).properties(width=300, height=300, selection=occupation_filter) # A function that generates a histogram of filtered data. def filtered_hist(field, label, filter): """Creates a layered chart of histograms. The first layer (light gray) contains the histogram of the full data, and the second contains the histogram of the filtered data. Args: field: the field for which to generate the histogram. label: String label of the histogram. filter: an alt.Selection object to be used to filter the data. """ base = alt.Chart().mark_bar().encode( x=alt.X(field, bin=alt.Bin(maxbins=10), title=label), y="count()", ).properties( width=300, ) return alt.layer( base.transform_filter(filter), base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)), ).resolve_scale(y='independent') ###Output _____no_output_____ ###Markdown Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations? ###Code users_ratings = ( ratings .groupby('user_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols() .merge(users, on='user_id') ) # Create a chart for the count, and one for the mean. alt.hconcat( filtered_hist('rating count', '# ratings / user', occupation_filter), filtered_hist('rating mean', 'mean user rating', occupation_filter), occupation_chart, data=users_ratings) ###Output _____no_output_____ ###Markdown MoviesIt is also useful to look at information about the movies and their ratings. ###Code movies_ratings = movies.merge( ratings .groupby('movie_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols(), on='movie_id') genre_filter = alt.selection_multi(fields=['genre']) genre_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y('genre'), color=alt.condition( genre_filter, alt.Color("genre:N"), alt.value('lightgray')) ).properties(height=300, selection=genre_filter) (movies_ratings[['title', 'rating count', 'rating mean']] .sort_values('rating count', ascending=False) .head(10)) (movies_ratings[['title', 'rating count', 'rating mean']] .mask('rating count', lambda x: x > 20) .sort_values('rating mean', ascending=False) .head(10)) ###Output _____no_output_____ ###Markdown Finally, the last chart shows the distribution of the number of ratings and average rating. ###Code # Display the number of ratings and average rating per movie. alt.hconcat( filtered_hist('rating count', '# ratings / movie', genre_filter), filtered_hist('rating mean', 'mean movie rating', genre_filter), genre_chart, data=movies_ratings) ###Output _____no_output_____ ###Markdown II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each row $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`. ###Code def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: A tf.SparseTensor representing the ratings matrix. """ # ========================= Complete this section ============================ # indices = # values = # ============================================================================ return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) #@title Solution def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: a tf.SparseTensor representing the ratings matrix. """ indices = ratings_df[['user_id', 'movie_id']].values values = ratings_df['rating'].values return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) ###Output _____no_output_____ ###Markdown Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful. ###Code def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ # ========================= Complete this section ============================ # loss = # ============================================================================ return loss #@title Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.gather_nd( tf.matmul(user_embeddings, movie_embeddings, transpose_b=True), sparse_ratings.indices) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible. ###Code #@title Alternate Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.reduce_sum( tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) * tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]), axis=1) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'. ###Code USER_RATINGS = True #@param {type:"boolean"} # @title Run to create a spreadsheet, then use it to enter your ratings. # Authenticate user. if USER_RATINGS: auth.authenticate_user() gc = gspread.authorize(GoogleCredentials.get_application_default()) # Create the spreadsheet and print a link to it. try: sh = gc.open('MovieLens-test') except(gspread.SpreadsheetNotFound): sh = gc.create('MovieLens-test') worksheet = sh.sheet1 titles = movies['title'].values cell_list = worksheet.range(1, 1, len(titles), 1) for cell, title in zip(cell_list, titles): cell.value = title worksheet.update_cells(cell_list) print("Link to the spreadsheet: " "https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id)) ###Output _____no_output_____ ###Markdown Run the next cell to load your ratings and add them to the main `ratings` DataFrame. ###Code # @title Run to load your ratings. # Load the ratings from the spreadsheet and create a DataFrame. if USER_RATINGS: my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index() my_ratings = my_ratings[my_ratings[1] != ''] my_ratings = pd.DataFrame({ 'user_id': "943", 'movie_id': list(map(str, my_ratings['index'])), 'rating': list(map(float, my_ratings[1])), }) # Remove previous ratings. ratings = ratings[ratings.user_id != "943"] # Add new ratings. ratings = ratings.append(my_ratings, ignore_index=True) # Add new user to the users DataFrame. if users.shape[0] == 943: users = users.append(users.iloc[942], ignore_index=True) users["user_id"][943] = "943" print("Added your %d ratings; you have great taste!" % len(my_ratings)) ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']]) ###Output _____no_output_____ ###Markdown III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']``` ###Code # @title CFModel helper class (run this cell) class CFModel(object): """Simple class that represents a collaborative filtering model""" def __init__(self, embedding_vars, loss, metrics=None): """Initializes a CFModel. Args: embedding_vars: A dictionary of tf.Variables. loss: A float Tensor. The loss to optimize. metrics: optional list of dictionaries of Tensors. The metrics in each dictionary will be plotted in a separate figure during training. """ self._embedding_vars = embedding_vars self._loss = loss self._metrics = metrics self._embeddings = {k: None for k in embedding_vars} self._session = None @property def embeddings(self): """The embeddings dictionary.""" return self._embeddings def train(self, num_iterations=100, learning_rate=1.0, plot_results=True, optimizer=tf.train.GradientDescentOptimizer): """Trains the model. Args: iterations: number of iterations to run. learning_rate: optimizer learning rate. plot_results: whether to plot the results at the end of training. optimizer: the optimizer to use. Default to GradientDescentOptimizer. Returns: The metrics dictionary evaluated at the last iteration. """ with self._loss.graph.as_default(): opt = optimizer(learning_rate) train_op = opt.minimize(self._loss) local_init_op = tf.group( tf.variables_initializer(opt.variables()), tf.local_variables_initializer()) if self._session is None: self._session = tf.Session() with self._session.as_default(): self._session.run(tf.global_variables_initializer()) self._session.run(tf.tables_initializer()) tf.train.start_queue_runners() with self._session.as_default(): local_init_op.run() iterations = [] metrics = self._metrics or ({},) metrics_vals = [collections.defaultdict(list) for _ in self._metrics] # Train and append results. for i in range(num_iterations + 1): _, results = self._session.run((train_op, metrics)) if (i % 10 == 0) or i == num_iterations: print("\r iteration %d: " % i + ", ".join( ["%s=%f" % (k, v) for r in results for k, v in r.items()]), end='') iterations.append(i) for metric_val, result in zip(metrics_vals, results): for k, v in result.items(): metric_val[k].append(v) for k, v in self._embedding_vars.items(): self._embeddings[k] = v.eval() if plot_results: # Plot the metrics. num_subplots = len(metrics)+1 fig = plt.figure() fig.set_size_inches(num_subplots*10, 8) for i, metric_vals in enumerate(metrics_vals): ax = fig.add_subplot(1, num_subplots, i+1) for k, v in metric_vals.items(): ax.plot(iterations, v, label=k) ax.set_xlim([1, num_iterations]) ax.legend() return results ###Output _____no_output_____ ###Markdown Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses. ###Code def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. # ========================= Complete this section ============================ # A_train = # A_test = # ============================================================================ # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # train_loss = # test_loss = # ============================================================================ metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) #@title Solution def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) train_loss = sparse_mean_square_error(A_train, U, V) test_loss = sparse_mean_square_error(A_test, U, V) metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) ###Output _____no_output_____ ###Markdown Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings. ###Code # Build the CF model and train it. model = build_model(ratings, embedding_dim=30, init_stddev=0.5) model.train(num_iterations=1000, learning_rate=10.) ###Output _____no_output_____ ###Markdown The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array. ###Code DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ # ========================= Complete this section ============================ # scores = # ============================================================================ return scores #@title Solution DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ u = query_embedding V = item_embeddings if measure == COSINE: V = V / np.linalg.norm(V, axis=1, keepdims=True) u = u / np.linalg.norm(u) scores = u.dot(V.T) return scores ###Output _____no_output_____ ###Markdown Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding. ###Code # @title User recommendations and nearest neighbors (run this cell) def user_recommendations(model, measure=DOT, exclude_rated=False, k=6): if USER_RATINGS: scores = compute_scores( model.embeddings["user_id"][943], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'movie_id': movies['movie_id'], 'titles': movies['title'], 'genres': movies['all_genres'], }) if exclude_rated: # remove movies that are already rated rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)] display.display(df.sort_values([score_key], ascending=False).head(k)) def movie_neighbors(model, title_substring, measure=DOT, k=6): # Search for movie ids that match the given substring. ids = movies[movies['title'].str.contains(title_substring)].index.values titles = movies.iloc[ids]['title'].values if len(titles) == 0: raise ValueError("Found no movies with title %s" % title_substring) print("Nearest neighbors of : %s." % titles[0]) if len(titles) > 1: print("[Found more than one matching movie. Other candidates: {}]".format( ", ".join(titles[1:]))) movie_id = ids[0] scores = compute_scores( model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'titles': movies['title'], 'genres': movies['all_genres'] }) display.display(df.sort_values([score_key], ascending=False).head(k)) ###Output _____no_output_____ ###Markdown Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you. ###Code user_recommendations(model, measure=COSINE, k=5) ###Output _____no_output_____ ###Markdown How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies. ###Code movie_neighbors(model, "Aladdin", DOT) movie_neighbors(model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell. ###Code # @title Embedding Visualization code (run this cell) def movie_embedding_norm(models): """Visualizes the norm and number of ratings of the movie embeddings. Args: model: A MFModel object. """ if not isinstance(models, list): models = [models] df = pd.DataFrame({ 'title': movies['title'], 'genre': movies['genre'], 'num_ratings': movies_ratings['rating count'], }) charts = [] brush = alt.selection_interval() for i, model in enumerate(models): norm_key = 'norm'+str(i) df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1) nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x='num_ratings', y=norm_key, color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray')) ).properties( selection=nearest).add_selection(brush) text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode( x='num_ratings', y=norm_key, text=alt.condition(nearest, 'title', alt.value(''))) charts.append(alt.layer(base, text)) return alt.hconcat(*charts, data=df) def visualize_movie_embeddings(data, x, y): nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x=x, y=y, color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")), ).properties( width=600, height=600, selection=nearest) text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode( x=x, y=y, text=alt.condition(nearest, 'title', alt.value(''))) return alt.hconcat(alt.layer(base, text), genre_chart, data=data) def tsne_movie_embeddings(model): """Visualizes the movie embeddings, projected using t-SNE with Cosine measure. Args: model: A MFModel object. """ tsne = sklearn.manifold.TSNE( n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0, init='pca', verbose=True, n_iter=400) print('Running t-SNE...') V_proj = tsne.fit_transform(model.embeddings["movie_id"]) movies.loc[:,'x'] = V_proj[:, 0] movies.loc[:,'y'] = V_proj[:, 1] return visualize_movie_embeddings(movies, 'x', 'y') movie_embedding_norm(model) ###Output _____no_output_____ ###Markdown Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies? ###Code #@title Solution model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05) model_lowinit.train(num_iterations=1000, learning_rate=10.) movie_neighbors(model_lowinit, "Aladdin", DOT) movie_neighbors(model_lowinit, "Aladdin", COSINE) movie_embedding_norm([model, model_lowinit]) ###Output _____no_output_____ ###Markdown Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/). ###Code tsne_movie_embeddings(model_lowinit) ###Output _____no_output_____ ###Markdown You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$. ###Code def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # error_train = # error_test = # gravity_loss = # regularization_loss = # ============================================================================ total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error': error_train, 'test_error': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) # @title Solution def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) error_train = sparse_mean_square_error(A_train, U, V) error_test = sparse_mean_square_error(A_test, U, V) gravity_loss = gravity_coeff * gravity(U, V) regularization_loss = regularization_coeff * ( tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value) total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error_observed': error_train, 'test_error_observed': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) ###Output _____no_output_____ ###Markdown It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions. ###Code reg_model = build_regularized_model( ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35, init_stddev=.05) reg_model.train(num_iterations=2000, learning_rate=20.) ###Output _____no_output_____ ###Markdown Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better. ###Code user_recommendations(reg_model, DOT, exclude_rated=True, k=10) ###Output _____no_output_____ ###Markdown Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings. ###Code movie_neighbors(reg_model, "Aladdin", DOT) movie_neighbors(reg_model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown Copyright 2018 Google LLC. ###Code # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages. ###Code # @title Imports (run this cell) from __future__ import print_function import numpy as np import pandas as pd import collections from mpl_toolkits.mplot3d import Axes3D from IPython import display from matplotlib import pyplot as plt import sklearn import sklearn.manifold import tensorflow.compat.v1 as tf tf.disable_v2_behavior() tf.logging.set_verbosity(tf.logging.ERROR) # Add some convenience functions to Pandas DataFrame. pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.3f}'.format def mask(df, key, function): """Returns a filtered dataframe, by applying function to key""" return df[function(df[key])] def flatten_cols(df): df.columns = [' '.join(col).strip() for col in df.columns.values] return df pd.DataFrame.mask = mask pd.DataFrame.flatten_cols = flatten_cols # Install Altair and activate its colab renderer. print("Installing Altair...") !pip install git+git://github.com/altair-viz/altair.git import altair as alt alt.data_transformers.enable('default', max_rows=None) alt.renderers.enable('colab') print("Done installing Altair.") # Install spreadsheets and import authentication module. USER_RATINGS = False !pip install --upgrade -q gspread from google.colab import auth import gspread from oauth2client.client import GoogleCredentials ###Output _____no_output_____ ###Markdown We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings. ###Code # @title Load the MovieLens data (run this cell). # Download MovieLens data. print("Downloading movielens data...") from urllib.request import urlretrieve import zipfile urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip") zip_ref = zipfile.ZipFile('movielens.zip', "r") zip_ref.extractall() print("Done. Dataset contains:") print(zip_ref.read('ml-100k/u.info')) # Load each data set (users, movies, and ratings). users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] users = pd.read_csv( 'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1') ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv( 'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1') # The movies file contains a binary feature for each genre. genre_cols = [ "genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] movies_cols = [ 'movie_id', 'title', 'release_date', "video_release_date", "imdb_url" ] + genre_cols movies = pd.read_csv( 'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1') # Since the ids start at 1, we shift them to start at 0. users["user_id"] = users["user_id"].apply(lambda x: str(x-1)) movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1)) movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1]) ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1)) ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1)) ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Compute the number of movies to which a genre is assigned. genre_occurences = movies[genre_cols].sum().to_dict() # Since some movies can belong to more than one genre, we create different # 'genre' columns as follows: # - all_genres: all the active genres of the movie. # - genre: randomly sampled from the active genres. def mark_genres(movies, genres): def get_random_genre(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return np.random.choice(active) def get_all_genres(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return '-'.join(active) movies['genre'] = [ get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])] movies['all_genres'] = [ get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])] mark_genres(movies, genre_cols) # Create one merged DataFrame containing all the movielens data. movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id') # Utility to split the data into training and test sets. def split_dataframe(df, holdout_fraction=0.1): """Splits a DataFrame into training and test sets. Args: df: a dataframe. holdout_fraction: fraction of dataframe rows to use in the test set. Returns: train: dataframe for training test: dataframe for testing """ test = df.sample(frac=holdout_fraction, replace=False) train = df[~df.index.isin(test.index)] return train, test ###Output _____no_output_____ ###Markdown I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features. ###Code users.describe() ###Output _____no_output_____ ###Markdown We can also print some basic statistics describing the categorical user features ###Code users.describe(include=[np.object]) ###Output _____no_output_____ ###Markdown We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart. ###Code # @title Altair visualization code (run this cell) # The following functions are used to generate interactive Altair charts. # We will display histograms of the data, sliced by a given attribute. # Create filters to be used to slice the data. occupation_filter = alt.selection_multi(fields=["occupation"]) occupation_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y("occupation:N"), color=alt.condition( occupation_filter, alt.Color("occupation:N", scale=alt.Scale(scheme='category20')), alt.value("lightgray")), ).properties(width=300, height=300, selection=occupation_filter) # A function that generates a histogram of filtered data. def filtered_hist(field, label, filter): """Creates a layered chart of histograms. The first layer (light gray) contains the histogram of the full data, and the second contains the histogram of the filtered data. Args: field: the field for which to generate the histogram. label: String label of the histogram. filter: an alt.Selection object to be used to filter the data. """ base = alt.Chart().mark_bar().encode( x=alt.X(field, bin=alt.Bin(maxbins=10), title=label), y="count()", ).properties( width=300, ) return alt.layer( base.transform_filter(filter), base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)), ).resolve_scale(y='independent') ###Output _____no_output_____ ###Markdown Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations? ###Code users_ratings = ( ratings .groupby('user_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols() .merge(users, on='user_id') ) # Create a chart for the count, and one for the mean. alt.hconcat( filtered_hist('rating count', '# ratings / user', occupation_filter), filtered_hist('rating mean', 'mean user rating', occupation_filter), occupation_chart, data=users_ratings) ###Output _____no_output_____ ###Markdown MoviesIt is also useful to look at information about the movies and their ratings. ###Code movies_ratings = movies.merge( ratings .groupby('movie_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols(), on='movie_id') genre_filter = alt.selection_multi(fields=['genre']) genre_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y('genre'), color=alt.condition( genre_filter, alt.Color("genre:N"), alt.value('lightgray')) ).properties(height=300, selection=genre_filter) (movies_ratings[['title', 'rating count', 'rating mean']] .sort_values('rating count', ascending=False) .head(10)) (movies_ratings[['title', 'rating count', 'rating mean']] .mask('rating count', lambda x: x > 20) .sort_values('rating mean', ascending=False) .head(10)) ###Output _____no_output_____ ###Markdown Finally, the last chart shows the distribution of the number of ratings and average rating. ###Code # Display the number of ratings and average rating per movie. alt.hconcat( filtered_hist('rating count', '# ratings / movie', genre_filter), filtered_hist('rating mean', 'mean movie rating', genre_filter), genre_chart, data=movies_ratings) ###Output _____no_output_____ ###Markdown II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`. ###Code def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: A tf.SparseTensor representing the ratings matrix. """ # ========================= Complete this section ============================ # indices = # values = # ============================================================================ return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) #@title Solution def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: a tf.SparseTensor representing the ratings matrix. """ indices = ratings_df[['user_id', 'movie_id']].values values = ratings_df['rating'].values return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) ###Output _____no_output_____ ###Markdown Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful. ###Code def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ # ========================= Complete this section ============================ # loss = # ============================================================================ return loss #@title Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.gather_nd( tf.matmul(user_embeddings, movie_embeddings, transpose_b=True), sparse_ratings.indices) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible. ###Code #@title Alternate Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.reduce_sum( tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) * tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]), axis=1) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'. ###Code USER_RATINGS = True #@param {type:"boolean"} # @title Run to create a spreadsheet, then use it to enter your ratings. # Authenticate user. if USER_RATINGS: auth.authenticate_user() gc = gspread.authorize(GoogleCredentials.get_application_default()) # Create the spreadsheet and print a link to it. try: sh = gc.open('MovieLens-test') except(gspread.SpreadsheetNotFound): sh = gc.create('MovieLens-test') worksheet = sh.sheet1 titles = movies['title'].values cell_list = worksheet.range(1, 1, len(titles), 1) for cell, title in zip(cell_list, titles): cell.value = title worksheet.update_cells(cell_list) print("Link to the spreadsheet: " "https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id)) ###Output _____no_output_____ ###Markdown Run the next cell to load your ratings and add them to the main `ratings` DataFrame. ###Code # @title Run to load your ratings. # Load the ratings from the spreadsheet and create a DataFrame. if USER_RATINGS: my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index() my_ratings = my_ratings[my_ratings[1] != ''] my_ratings = pd.DataFrame({ 'user_id': "943", 'movie_id': list(map(str, my_ratings['index'])), 'rating': list(map(float, my_ratings[1])), }) # Remove previous ratings. ratings = ratings[ratings.user_id != "943"] # Add new ratings. ratings = ratings.append(my_ratings, ignore_index=True) # Add new user to the users DataFrame. if users.shape[0] == 943: users = users.append(users.iloc[942], ignore_index=True) users["user_id"][943] = "943" print("Added your %d ratings; you have great taste!" % len(my_ratings)) ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']]) ###Output _____no_output_____ ###Markdown III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']``` ###Code # @title CFModel helper class (run this cell) class CFModel(object): """Simple class that represents a collaborative filtering model""" def __init__(self, embedding_vars, loss, metrics=None): """Initializes a CFModel. Args: embedding_vars: A dictionary of tf.Variables. loss: A float Tensor. The loss to optimize. metrics: optional list of dictionaries of Tensors. The metrics in each dictionary will be plotted in a separate figure during training. """ self._embedding_vars = embedding_vars self._loss = loss self._metrics = metrics self._embeddings = {k: None for k in embedding_vars} self._session = None @property def embeddings(self): """The embeddings dictionary.""" return self._embeddings def train(self, num_iterations=100, learning_rate=1.0, plot_results=True, optimizer=tf.train.GradientDescentOptimizer): """Trains the model. Args: iterations: number of iterations to run. learning_rate: optimizer learning rate. plot_results: whether to plot the results at the end of training. optimizer: the optimizer to use. Default to GradientDescentOptimizer. Returns: The metrics dictionary evaluated at the last iteration. """ with self._loss.graph.as_default(): opt = optimizer(learning_rate) train_op = opt.minimize(self._loss) local_init_op = tf.group( tf.variables_initializer(opt.variables()), tf.local_variables_initializer()) if self._session is None: self._session = tf.Session() with self._session.as_default(): self._session.run(tf.global_variables_initializer()) self._session.run(tf.tables_initializer()) tf.train.start_queue_runners() with self._session.as_default(): local_init_op.run() iterations = [] metrics = self._metrics or ({},) metrics_vals = [collections.defaultdict(list) for _ in self._metrics] # Train and append results. for i in range(num_iterations + 1): _, results = self._session.run((train_op, metrics)) if (i % 10 == 0) or i == num_iterations: print("\r iteration %d: " % i + ", ".join( ["%s=%f" % (k, v) for r in results for k, v in r.items()]), end='') iterations.append(i) for metric_val, result in zip(metrics_vals, results): for k, v in result.items(): metric_val[k].append(v) for k, v in self._embedding_vars.items(): self._embeddings[k] = v.eval() if plot_results: # Plot the metrics. num_subplots = len(metrics)+1 fig = plt.figure() fig.set_size_inches(num_subplots*10, 8) for i, metric_vals in enumerate(metrics_vals): ax = fig.add_subplot(1, num_subplots, i+1) for k, v in metric_vals.items(): ax.plot(iterations, v, label=k) ax.set_xlim([1, num_iterations]) ax.legend() return results ###Output _____no_output_____ ###Markdown Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses. ###Code def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. # ========================= Complete this section ============================ # A_train = # A_test = # ============================================================================ # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # train_loss = # test_loss = # ============================================================================ metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) #@title Solution def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) train_loss = sparse_mean_square_error(A_train, U, V) test_loss = sparse_mean_square_error(A_test, U, V) metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) ###Output _____no_output_____ ###Markdown Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings. ###Code # Build the CF model and train it. model = build_model(ratings, embedding_dim=30, init_stddev=0.5) model.train(num_iterations=1000, learning_rate=10.) ###Output _____no_output_____ ###Markdown The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array. ###Code DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ # ========================= Complete this section ============================ # scores = # ============================================================================ return scores #@title Solution DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ u = query_embedding V = item_embeddings if measure == COSINE: V = V / np.linalg.norm(V, axis=1, keepdims=True) u = u / np.linalg.norm(u) scores = u.dot(V.T) return scores ###Output _____no_output_____ ###Markdown Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding. ###Code # @title User recommendations and nearest neighbors (run this cell) def user_recommendations(model, measure=DOT, exclude_rated=False, k=6): if USER_RATINGS: scores = compute_scores( model.embeddings["user_id"][943], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'movie_id': movies['movie_id'], 'titles': movies['title'], 'genres': movies['all_genres'], }) if exclude_rated: # remove movies that are already rated rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)] display.display(df.sort_values([score_key], ascending=False).head(k)) def movie_neighbors(model, title_substring, measure=DOT, k=6): # Search for movie ids that match the given substring. ids = movies[movies['title'].str.contains(title_substring)].index.values titles = movies.iloc[ids]['title'].values if len(titles) == 0: raise ValueError("Found no movies with title %s" % title_substring) print("Nearest neighbors of : %s." % titles[0]) if len(titles) > 1: print("[Found more than one matching movie. Other candidates: {}]".format( ", ".join(titles[1:]))) movie_id = ids[0] scores = compute_scores( model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'titles': movies['title'], 'genres': movies['all_genres'] }) display.display(df.sort_values([score_key], ascending=False).head(k)) ###Output _____no_output_____ ###Markdown Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you. ###Code user_recommendations(model, measure=COSINE, k=5) ###Output _____no_output_____ ###Markdown How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies. ###Code movie_neighbors(model, "Aladdin", DOT) movie_neighbors(model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell. ###Code # @title Embedding Visualization code (run this cell) def movie_embedding_norm(models): """Visualizes the norm and number of ratings of the movie embeddings. Args: model: A MFModel object. """ if not isinstance(models, list): models = [models] df = pd.DataFrame({ 'title': movies['title'], 'genre': movies['genre'], 'num_ratings': movies_ratings['rating count'], }) charts = [] brush = alt.selection_interval() for i, model in enumerate(models): norm_key = 'norm'+str(i) df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1) nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x='num_ratings', y=norm_key, color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray')) ).properties( selection=nearest).add_selection(brush) text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode( x='num_ratings', y=norm_key, text=alt.condition(nearest, 'title', alt.value(''))) charts.append(alt.layer(base, text)) return alt.hconcat(*charts, data=df) def visualize_movie_embeddings(data, x, y): nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x=x, y=y, color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")), ).properties( width=600, height=600, selection=nearest) text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode( x=x, y=y, text=alt.condition(nearest, 'title', alt.value(''))) return alt.hconcat(alt.layer(base, text), genre_chart, data=data) def tsne_movie_embeddings(model): """Visualizes the movie embeddings, projected using t-SNE with Cosine measure. Args: model: A MFModel object. """ tsne = sklearn.manifold.TSNE( n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0, init='pca', verbose=True, n_iter=400) print('Running t-SNE...') V_proj = tsne.fit_transform(model.embeddings["movie_id"]) movies.loc[:,'x'] = V_proj[:, 0] movies.loc[:,'y'] = V_proj[:, 1] return visualize_movie_embeddings(movies, 'x', 'y') movie_embedding_norm(model) ###Output _____no_output_____ ###Markdown Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies? ###Code #@title Solution model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05) model_lowinit.train(num_iterations=1000, learning_rate=10.) movie_neighbors(model_lowinit, "Aladdin", DOT) movie_neighbors(model_lowinit, "Aladdin", COSINE) movie_embedding_norm([model, model_lowinit]) ###Output _____no_output_____ ###Markdown Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/). ###Code tsne_movie_embeddings(model_lowinit) ###Output _____no_output_____ ###Markdown You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$. ###Code def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # error_train = # error_test = # gravity_loss = # regularization_loss = # ============================================================================ total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error': error_train, 'test_error': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) # @title Solution def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) error_train = sparse_mean_square_error(A_train, U, V) error_test = sparse_mean_square_error(A_test, U, V) gravity_loss = gravity_coeff * gravity(U, V) regularization_loss = regularization_coeff * ( tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value) total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error_observed': error_train, 'test_error_observed': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) ###Output _____no_output_____ ###Markdown It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions. ###Code reg_model = build_regularized_model( ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35, init_stddev=.05) reg_model.train(num_iterations=2000, learning_rate=20.) ###Output _____no_output_____ ###Markdown Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better. ###Code user_recommendations(reg_model, DOT, exclude_rated=True, k=10) ###Output _____no_output_____ ###Markdown Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings. ###Code movie_neighbors(reg_model, "Aladdin", DOT) movie_neighbors(reg_model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously. ###Code movie_embedding_norm([model, model_lowinit, reg_model]) # Visualize the embeddings tsne_movie_embeddings(reg_model) ###Output _____no_output_____ ###Markdown We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id. ###Code rated_movies = (ratings[["user_id", "movie_id"]] .groupby("user_id", as_index=False) .aggregate(lambda x: list(x))) rated_movies.head() ###Output _____no_output_____ ###Markdown We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year. ###Code #@title Batch generation code (run this cell) years_dict = { movie: year for movie, year in zip(movies["movie_id"], movies["year"]) } genres_dict = { movie: genres.split('-') for movie, genres in zip(movies["movie_id"], movies["all_genres"]) } def make_batch(ratings, batch_size): """Creates a batch of examples. Args: ratings: A DataFrame of ratings such that examples["movie_id"] is a list of movies rated by a user. batch_size: The batch size. """ def pad(x, fill): return pd.DataFrame.from_dict(x).fillna(fill).values movie = [] year = [] genre = [] label = [] for movie_ids in ratings["movie_id"].values: movie.append(movie_ids) genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]]) year.append([years_dict[movie_id] for movie_id in movie_ids]) label.append([int(movie_id) for movie_id in movie_ids]) features = { "movie_id": pad(movie, ""), "year": pad(year, ""), "genre": pad(genre, ""), "label": pad(label, -1) } batch = ( tf.data.Dataset.from_tensor_slices(features) .shuffle(1000) .repeat() .batch(batch_size) .make_one_shot_iterator() .get_next()) return batch def select_random(x): """Selectes a random elements from each row of x.""" def to_float(x): return tf.cast(x, tf.float32) def to_int(x): return tf.cast(x, tf.int64) batch_size = tf.shape(x)[0] rn = tf.range(batch_size) nnz = to_float(tf.count_nonzero(x >= 0, axis=1)) rnd = tf.random_uniform([batch_size]) ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1) return to_int(tf.gather_nd(x, ids)) ###Output _____no_output_____ ###Markdown Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$. ###Code def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A sparse tensor of dense_shape [batch_size, 1], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # ========================= Complete this section ============================ # logits = # loss = # ============================================================================ return loss # @title Solution def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A tensor of [batch_size], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # Verify that the embddings have compatible dimensions user_emb_dim = user_embeddings.shape[1].value movie_emb_dim = movie_embeddings.shape[1].value if user_emb_dim != movie_emb_dim: raise ValueError( "The user embedding dimension %d should match the movie embedding " "dimension % d" % (user_emb_dim, movie_emb_dim)) logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) return loss ###Output _____no_output_____ ###Markdown Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.![Softmax model](https://github.com/google/eng-edu/blob/master/ml/recommendation-systems/images/softmax-model.png?raw=true)Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise). ###Code def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") # ========================= Complete this section ============================ # train_loss = # test_loss = # test_precision_at_10 = # ============================================================================ metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) # @title Solution def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") test_loss = softmax_loss( test_user_embeddings, movie_embeddings, test_labels) train_loss = softmax_loss( train_user_embeddings, movie_embeddings, train_labels) _, test_precision_at_10 = tf.metrics.precision_at_k( labels=test_labels, predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True), k=10) metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) ###Output _____no_output_____ ###Markdown Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column). ###Code # Create feature embedding columns def make_embedding_col(key, embedding_dim): categorical_col = tf.feature_column.categorical_column_with_vocabulary_list( key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0) return tf.feature_column.embedding_column( categorical_column=categorical_col, dimension=embedding_dim, # default initializer: trancated normal with stddev=1/sqrt(dimension) combiner='mean') with tf.Graph().as_default(): softmax_model = build_softmax_model( rated_movies, embedding_cols=[ make_embedding_col("movie_id", 35), make_embedding_col("genre", 3), make_embedding_col("year", 2), ], hidden_dims=[35]) softmax_model.train( learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer) ###Output _____no_output_____ ###Markdown Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights. ###Code movie_neighbors(softmax_model, "Aladdin", DOT) movie_neighbors(softmax_model, "Aladdin", COSINE) movie_embedding_norm([reg_model, softmax_model]) tsne_movie_embeddings(softmax_model) ###Output _____no_output_____ ###Markdown Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously. ###Code movie_embedding_norm([model, model_lowinit, reg_model]) # Visualize the embeddings tsne_movie_embeddings(reg_model) ###Output _____no_output_____ ###Markdown We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id. ###Code rated_movies = (ratings[["user_id", "movie_id"]] .groupby("user_id", as_index=False) .aggregate(lambda x: list(x))) rated_movies.head() ###Output _____no_output_____ ###Markdown We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year. ###Code #@title Batch generation code (run this cell) years_dict = { movie: year for movie, year in zip(movies["movie_id"], movies["year"]) } genres_dict = { movie: genres.split('-') for movie, genres in zip(movies["movie_id"], movies["all_genres"]) } def make_batch(ratings, batch_size): """Creates a batch of examples. Args: ratings: A DataFrame of ratings such that examples["movie_id"] is a list of movies rated by a user. batch_size: The batch size. """ def pad(x, fill): return pd.DataFrame.from_dict(x).fillna(fill).values movie = [] year = [] genre = [] label = [] for movie_ids in ratings["movie_id"].values: movie.append(movie_ids) genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]]) year.append([years_dict[movie_id] for movie_id in movie_ids]) label.append([int(movie_id) for movie_id in movie_ids]) features = { "movie_id": pad(movie, ""), "year": pad(year, ""), "genre": pad(genre, ""), "label": pad(label, -1) } batch = ( tf.data.Dataset.from_tensor_slices(features) .shuffle(1000) .repeat() .batch(batch_size) .make_one_shot_iterator() .get_next()) return batch def select_random(x): """Selectes a random elements from each row of x.""" def to_float(x): return tf.cast(x, tf.float32) def to_int(x): return tf.cast(x, tf.int64) batch_size = tf.shape(x)[0] rn = tf.range(batch_size) nnz = to_float(tf.count_nonzero(x >= 0, axis=1)) rnd = tf.random_uniform([batch_size]) ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1) return to_int(tf.gather_nd(x, ids)) ###Output _____no_output_____ ###Markdown Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$. ###Code def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A sparse tensor of dense_shape [batch_size, 1], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # ========================= Complete this section ============================ # logits = # loss = # ============================================================================ return loss # @title Solution def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A tensor of [batch_size], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # Verify that the embddings have compatible dimensions user_emb_dim = user_embeddings.shape[1].value movie_emb_dim = movie_embeddings.shape[1].value if user_emb_dim != movie_emb_dim: raise ValueError( "The user embedding dimension %d should match the movie embedding " "dimension % d" % (user_emb_dim, movie_emb_dim)) logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) return loss ###Output _____no_output_____ ###Markdown Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.![Softmax model](https://github.com/google/eng-edu/blob/master/ml/recommendation-systems/images/softmax-model.png?raw=true)Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise). ###Code def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") # ========================= Complete this section ============================ # train_loss = # test_loss = # test_precision_at_10 = # ============================================================================ metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) # @title Solution def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") test_loss = softmax_loss( test_user_embeddings, movie_embeddings, test_labels) train_loss = softmax_loss( train_user_embeddings, movie_embeddings, train_labels) _, test_precision_at_10 = tf.metrics.precision_at_k( labels=test_labels, predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True), k=10) metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) ###Output _____no_output_____ ###Markdown Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column). ###Code # Create feature embedding columns def make_embedding_col(key, embedding_dim): categorical_col = tf.feature_column.categorical_column_with_vocabulary_list( key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0) return tf.feature_column.embedding_column( categorical_column=categorical_col, dimension=embedding_dim, # default initializer: trancated normal with stddev=1/sqrt(dimension) combiner='mean') with tf.Graph().as_default(): softmax_model = build_softmax_model( rated_movies, embedding_cols=[ make_embedding_col("movie_id", 35), make_embedding_col("genre", 3), make_embedding_col("year", 2), ], hidden_dims=[35]) softmax_model.train( learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer) ###Output _____no_output_____ ###Markdown Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights. ###Code movie_neighbors(softmax_model, "Aladdin", DOT) movie_neighbors(softmax_model, "Aladdin", COSINE) movie_embedding_norm([reg_model, softmax_model]) tsne_movie_embeddings(softmax_model) ###Output _____no_output_____ ###Markdown Copyright 2018 Google LLC. ###Code # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages. ###Code # @title Imports (run this cell) from __future__ import print_function import numpy as np import pandas as pd import collections from mpl_toolkits.mplot3d import Axes3D from IPython import display from matplotlib import pyplot as plt import sklearn import sklearn.manifold import tensorflow.compat.v1 as tf tf.disable_v2_behavior() tf.logging.set_verbosity(tf.logging.ERROR) # Add some convenience functions to Pandas DataFrame. pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.3f}'.format def mask(df, key, function): """Returns a filtered dataframe, by applying function to key""" return df[function(df[key])] def flatten_cols(df): df.columns = [' '.join(col).strip() for col in df.columns.values] return df pd.DataFrame.mask = mask pd.DataFrame.flatten_cols = flatten_cols # Install Altair and activate its colab renderer. print("Installing Altair...") !pip install git+git://github.com/altair-viz/altair.git import altair as alt alt.data_transformers.enable('default', max_rows=None) alt.renderers.enable('colab') print("Done installing Altair.") # Install spreadsheets and import authentication module. USER_RATINGS = False !pip install --upgrade -q gspread from google.colab import auth import gspread from oauth2client.client import GoogleCredentials ###Output _____no_output_____ ###Markdown We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings. ###Code # @title Load the MovieLens data (run this cell). # Download MovieLens data. print("Downloading movielens data...") from urllib.request import urlretrieve import zipfile urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip") zip_ref = zipfile.ZipFile('movielens.zip', "r") zip_ref.extractall() print("Done. Dataset contains:") print(zip_ref.read('ml-100k/u.info')) # Load each data set (users, movies, and ratings). users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] users = pd.read_csv( 'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1') ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv( 'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1') # The movies file contains a binary feature for each genre. genre_cols = [ "genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] movies_cols = [ 'movie_id', 'title', 'release_date', "video_release_date", "imdb_url" ] + genre_cols movies = pd.read_csv( 'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1') # Since the ids start at 1, we shift them to start at 0. users["user_id"] = users["user_id"].apply(lambda x: str(x-1)) movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1)) movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1]) ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1)) ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1)) ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Compute the number of movies to which a genre is assigned. genre_occurences = movies[genre_cols].sum().to_dict() # Since some movies can belong to more than one genre, we create different # 'genre' columns as follows: # - all_genres: all the active genres of the movie. # - genre: randomly sampled from the active genres. def mark_genres(movies, genres): def get_random_genre(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return np.random.choice(active) def get_all_genres(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return '-'.join(active) movies['genre'] = [ get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])] movies['all_genres'] = [ get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])] mark_genres(movies, genre_cols) # Create one merged DataFrame containing all the movielens data. movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id') # Utility to split the data into training and test sets. def split_dataframe(df, holdout_fraction=0.1): """Splits a DataFrame into training and test sets. Args: df: a dataframe. holdout_fraction: fraction of dataframe rows to use in the test set. Returns: train: dataframe for training test: dataframe for testing """ test = df.sample(frac=holdout_fraction, replace=False) train = df[~df.index.isin(test.index)] return train, test ###Output _____no_output_____ ###Markdown I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features. ###Code users.describe() ###Output _____no_output_____ ###Markdown We can also print some basic statistics describing the categorical user features ###Code users.describe(include=[np.object]) ###Output _____no_output_____ ###Markdown We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart. ###Code # @title Altair visualization code (run this cell) # The following functions are used to generate interactive Altair charts. # We will display histograms of the data, sliced by a given attribute. # Create filters to be used to slice the data. occupation_filter = alt.selection_multi(fields=["occupation"]) occupation_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y("occupation:N"), color=alt.condition( occupation_filter, alt.Color("occupation:N", scale=alt.Scale(scheme='category20')), alt.value("lightgray")), ).properties(width=300, height=300, selection=occupation_filter) # A function that generates a histogram of filtered data. def filtered_hist(field, label, filter): """Creates a layered chart of histograms. The first layer (light gray) contains the histogram of the full data, and the second contains the histogram of the filtered data. Args: field: the field for which to generate the histogram. label: String label of the histogram. filter: an alt.Selection object to be used to filter the data. """ base = alt.Chart().mark_bar().encode( x=alt.X(field, bin=alt.Bin(maxbins=10), title=label), y="count()", ).properties( width=300, ) return alt.layer( base.transform_filter(filter), base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)), ).resolve_scale(y='independent') ###Output _____no_output_____ ###Markdown Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations? ###Code users_ratings = ( ratings .groupby('user_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols() .merge(users, on='user_id') ) # Create a chart for the count, and one for the mean. alt.hconcat( filtered_hist('rating count', '# ratings / user', occupation_filter), filtered_hist('rating mean', 'mean user rating', occupation_filter), occupation_chart, data=users_ratings) ###Output _____no_output_____ ###Markdown MoviesIt is also useful to look at information about the movies and their ratings. ###Code movies_ratings = movies.merge( ratings .groupby('movie_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols(), on='movie_id') genre_filter = alt.selection_multi(fields=['genre']) genre_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y('genre'), color=alt.condition( genre_filter, alt.Color("genre:N"), alt.value('lightgray')) ).properties(height=300, selection=genre_filter) (movies_ratings[['title', 'rating count', 'rating mean']] .sort_values('rating count', ascending=False) .head(10)) (movies_ratings[['title', 'rating count', 'rating mean']] .mask('rating count', lambda x: x > 20) .sort_values('rating mean', ascending=False) .head(10)) ###Output _____no_output_____ ###Markdown Finally, the last chart shows the distribution of the number of ratings and average rating. ###Code # Display the number of ratings and average rating per movie. alt.hconcat( filtered_hist('rating count', '# ratings / movie', genre_filter), filtered_hist('rating mean', 'mean movie rating', genre_filter), genre_chart, data=movies_ratings) ###Output _____no_output_____ ###Markdown II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each row $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`. ###Code def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: A tf.SparseTensor representing the ratings matrix. """ # ========================= Complete this section ============================ # indices = # values = # ============================================================================ return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) #@title Solution def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: a tf.SparseTensor representing the ratings matrix. """ indices = ratings_df[['user_id', 'movie_id']].values values = ratings_df['rating'].values return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) ###Output _____no_output_____ ###Markdown Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful. ###Code def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ # ========================= Complete this section ============================ # loss = # ============================================================================ return loss #@title Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.gather_nd( tf.matmul(user_embeddings, movie_embeddings, transpose_b=True), sparse_ratings.indices) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible. ###Code #@title Alternate Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.reduce_sum( tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) * tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]), axis=1) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'. ###Code USER_RATINGS = True #@param {type:"boolean"} # @title Run to create a spreadsheet, then use it to enter your ratings. # Authenticate user. if USER_RATINGS: auth.authenticate_user() gc = gspread.authorize(GoogleCredentials.get_application_default()) # Create the spreadsheet and print a link to it. try: sh = gc.open('MovieLens-test') except(gspread.SpreadsheetNotFound): sh = gc.create('MovieLens-test') worksheet = sh.sheet1 titles = movies['title'].values cell_list = worksheet.range(1, 1, len(titles), 1) for cell, title in zip(cell_list, titles): cell.value = title worksheet.update_cells(cell_list) print("Link to the spreadsheet: " "https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id)) ###Output _____no_output_____ ###Markdown Run the next cell to load your ratings and add them to the main `ratings` DataFrame. ###Code # @title Run to load your ratings. # Load the ratings from the spreadsheet and create a DataFrame. if USER_RATINGS: my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index() my_ratings = my_ratings[my_ratings[1] != ''] my_ratings = pd.DataFrame({ 'user_id': "943", 'movie_id': list(map(str, my_ratings['index'])), 'rating': list(map(float, my_ratings[1])), }) # Remove previous ratings. ratings = ratings[ratings.user_id != "943"] # Add new ratings. ratings = ratings.append(my_ratings, ignore_index=True) # Add new user to the users DataFrame. if users.shape[0] == 943: users = users.append(users.iloc[942], ignore_index=True) users["user_id"][943] = "943" print("Added your %d ratings; you have great taste!" % len(my_ratings)) ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']]) ###Output _____no_output_____ ###Markdown III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']``` ###Code # @title CFModel helper class (run this cell) class CFModel(object): """Simple class that represents a collaborative filtering model""" def __init__(self, embedding_vars, loss, metrics=None): """Initializes a CFModel. Args: embedding_vars: A dictionary of tf.Variables. loss: A float Tensor. The loss to optimize. metrics: optional list of dictionaries of Tensors. The metrics in each dictionary will be plotted in a separate figure during training. """ self._embedding_vars = embedding_vars self._loss = loss self._metrics = metrics self._embeddings = {k: None for k in embedding_vars} self._session = None @property def embeddings(self): """The embeddings dictionary.""" return self._embeddings def train(self, num_iterations=100, learning_rate=1.0, plot_results=True, optimizer=tf.train.GradientDescentOptimizer): """Trains the model. Args: iterations: number of iterations to run. learning_rate: optimizer learning rate. plot_results: whether to plot the results at the end of training. optimizer: the optimizer to use. Default to GradientDescentOptimizer. Returns: The metrics dictionary evaluated at the last iteration. """ with self._loss.graph.as_default(): opt = optimizer(learning_rate) train_op = opt.minimize(self._loss) local_init_op = tf.group( tf.variables_initializer(opt.variables()), tf.local_variables_initializer()) if self._session is None: self._session = tf.Session() with self._session.as_default(): self._session.run(tf.global_variables_initializer()) self._session.run(tf.tables_initializer()) tf.train.start_queue_runners() with self._session.as_default(): local_init_op.run() iterations = [] metrics = self._metrics or ({},) metrics_vals = [collections.defaultdict(list) for _ in self._metrics] # Train and append results. for i in range(num_iterations + 1): _, results = self._session.run((train_op, metrics)) if (i % 10 == 0) or i == num_iterations: print("\r iteration %d: " % i + ", ".join( ["%s=%f" % (k, v) for r in results for k, v in r.items()]), end='') iterations.append(i) for metric_val, result in zip(metrics_vals, results): for k, v in result.items(): metric_val[k].append(v) for k, v in self._embedding_vars.items(): self._embeddings[k] = v.eval() if plot_results: # Plot the metrics. num_subplots = len(metrics)+1 fig = plt.figure() fig.set_size_inches(num_subplots*10, 8) for i, metric_vals in enumerate(metrics_vals): ax = fig.add_subplot(1, num_subplots, i+1) for k, v in metric_vals.items(): ax.plot(iterations, v, label=k) ax.set_xlim([1, num_iterations]) ax.legend() return results ###Output _____no_output_____ ###Markdown Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses. ###Code def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. # ========================= Complete this section ============================ # A_train = # A_test = # ============================================================================ # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # train_loss = # test_loss = # ============================================================================ metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) #@title Solution def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) train_loss = sparse_mean_square_error(A_train, U, V) test_loss = sparse_mean_square_error(A_test, U, V) metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) ###Output _____no_output_____ ###Markdown Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings. ###Code # Build the CF model and train it. model = build_model(ratings, embedding_dim=30, init_stddev=0.5) model.train(num_iterations=1000, learning_rate=10.) ###Output _____no_output_____ ###Markdown The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array. ###Code DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ # ========================= Complete this section ============================ # scores = # ============================================================================ return scores #@title Solution DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ u = query_embedding V = item_embeddings if measure == COSINE: V = V / np.linalg.norm(V, axis=1, keepdims=True) u = u / np.linalg.norm(u) scores = u.dot(V.T) return scores ###Output _____no_output_____ ###Markdown Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding. ###Code # @title User recommendations and nearest neighbors (run this cell) def user_recommendations(model, measure=DOT, exclude_rated=False, k=6): if USER_RATINGS: scores = compute_scores( model.embeddings["user_id"][943], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'movie_id': movies['movie_id'], 'titles': movies['title'], 'genres': movies['all_genres'], }) if exclude_rated: # remove movies that are already rated rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)] display.display(df.sort_values([score_key], ascending=False).head(k)) def movie_neighbors(model, title_substring, measure=DOT, k=6): # Search for movie ids that match the given substring. ids = movies[movies['title'].str.contains(title_substring)].index.values titles = movies.iloc[ids]['title'].values if len(titles) == 0: raise ValueError("Found no movies with title %s" % title_substring) print("Nearest neighbors of : %s." % titles[0]) if len(titles) > 1: print("[Found more than one matching movie. Other candidates: {}]".format( ", ".join(titles[1:]))) movie_id = ids[0] scores = compute_scores( model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'titles': movies['title'], 'genres': movies['all_genres'] }) display.display(df.sort_values([score_key], ascending=False).head(k)) ###Output _____no_output_____ ###Markdown Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you. ###Code user_recommendations(model, measure=COSINE, k=5) ###Output _____no_output_____ ###Markdown How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies. ###Code movie_neighbors(model, "Aladdin", DOT) movie_neighbors(model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell. ###Code # @title Embedding Visualization code (run this cell) def movie_embedding_norm(models): """Visualizes the norm and number of ratings of the movie embeddings. Args: model: A MFModel object. """ if not isinstance(models, list): models = [models] df = pd.DataFrame({ 'title': movies['title'], 'genre': movies['genre'], 'num_ratings': movies_ratings['rating count'], }) charts = [] brush = alt.selection_interval() for i, model in enumerate(models): norm_key = 'norm'+str(i) df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1) nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x='num_ratings', y=norm_key, color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray')) ).properties( selection=nearest).add_selection(brush) text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode( x='num_ratings', y=norm_key, text=alt.condition(nearest, 'title', alt.value(''))) charts.append(alt.layer(base, text)) return alt.hconcat(*charts, data=df) def visualize_movie_embeddings(data, x, y): nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x=x, y=y, color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")), ).properties( width=600, height=600, selection=nearest) text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode( x=x, y=y, text=alt.condition(nearest, 'title', alt.value(''))) return alt.hconcat(alt.layer(base, text), genre_chart, data=data) def tsne_movie_embeddings(model): """Visualizes the movie embeddings, projected using t-SNE with Cosine measure. Args: model: A MFModel object. """ tsne = sklearn.manifold.TSNE( n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0, init='pca', verbose=True, n_iter=400) print('Running t-SNE...') V_proj = tsne.fit_transform(model.embeddings["movie_id"]) movies.loc[:,'x'] = V_proj[:, 0] movies.loc[:,'y'] = V_proj[:, 1] return visualize_movie_embeddings(movies, 'x', 'y') movie_embedding_norm(model) ###Output _____no_output_____ ###Markdown Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies? ###Code #@title Solution model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05) model_lowinit.train(num_iterations=1000, learning_rate=10.) movie_neighbors(model_lowinit, "Aladdin", DOT) movie_neighbors(model_lowinit, "Aladdin", COSINE) movie_embedding_norm([model, model_lowinit]) ###Output _____no_output_____ ###Markdown Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/). ###Code tsne_movie_embeddings(model_lowinit) ###Output _____no_output_____ ###Markdown You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$. ###Code def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # error_train = # error_test = # gravity_loss = # regularization_loss = # ============================================================================ total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error': error_train, 'test_error': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) # @title Solution def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) error_train = sparse_mean_square_error(A_train, U, V) error_test = sparse_mean_square_error(A_test, U, V) gravity_loss = gravity_coeff * gravity(U, V) regularization_loss = regularization_coeff * ( tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value) total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error_observed': error_train, 'test_error_observed': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) ###Output _____no_output_____ ###Markdown It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions. ###Code reg_model = build_regularized_model( ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35, init_stddev=.05) reg_model.train(num_iterations=2000, learning_rate=20.) ###Output _____no_output_____ ###Markdown Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better. ###Code user_recommendations(reg_model, DOT, exclude_rated=True, k=10) ###Output _____no_output_____ ###Markdown Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings. ###Code movie_neighbors(reg_model, "Aladdin", DOT) movie_neighbors(reg_model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously. ###Code movie_embedding_norm([model, model_lowinit, reg_model]) # Visualize the embeddings tsne_movie_embeddings(reg_model) ###Output _____no_output_____ ###Markdown We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id. ###Code rated_movies = (ratings[["user_id", "movie_id"]] .groupby("user_id", as_index=False) .aggregate(lambda x: list(x))) rated_movies.head() ###Output _____no_output_____ ###Markdown We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year. ###Code #@title Batch generation code (run this cell) years_dict = { movie: year for movie, year in zip(movies["movie_id"], movies["year"]) } genres_dict = { movie: genres.split('-') for movie, genres in zip(movies["movie_id"], movies["all_genres"]) } def make_batch(ratings, batch_size): """Creates a batch of examples. Args: ratings: A DataFrame of ratings such that examples["movie_id"] is a list of movies rated by a user. batch_size: The batch size. """ def pad(x, fill): return pd.DataFrame.from_dict(x).fillna(fill).values movie = [] year = [] genre = [] label = [] for movie_ids in ratings["movie_id"].values: movie.append(movie_ids) genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]]) year.append([years_dict[movie_id] for movie_id in movie_ids]) label.append([int(movie_id) for movie_id in movie_ids]) features = { "movie_id": pad(movie, ""), "year": pad(year, ""), "genre": pad(genre, ""), "label": pad(label, -1) } batch = ( tf.data.Dataset.from_tensor_slices(features) .shuffle(1000) .repeat() .batch(batch_size) .make_one_shot_iterator() .get_next()) return batch def select_random(x): """Selectes a random elements from each row of x.""" def to_float(x): return tf.cast(x, tf.float32) def to_int(x): return tf.cast(x, tf.int64) batch_size = tf.shape(x)[0] rn = tf.range(batch_size) nnz = to_float(tf.count_nonzero(x >= 0, axis=1)) rnd = tf.random_uniform([batch_size]) ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1) return to_int(tf.gather_nd(x, ids)) ###Output _____no_output_____ ###Markdown Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$. ###Code def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A sparse tensor of dense_shape [batch_size, 1], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # ========================= Complete this section ============================ # logits = # loss = # ============================================================================ return loss # @title Solution def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A tensor of [batch_size], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # Verify that the embddings have compatible dimensions user_emb_dim = user_embeddings.shape[1].value movie_emb_dim = movie_embeddings.shape[1].value if user_emb_dim != movie_emb_dim: raise ValueError( "The user embedding dimension %d should match the movie embedding " "dimension % d" % (user_emb_dim, movie_emb_dim)) logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) return loss ###Output _____no_output_____ ###Markdown Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.![Softmax model](https://github.com/google/eng-edu/blob/master/ml/recommendation-systems/images/softmax-model.png?raw=true)Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise). ###Code def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") # ========================= Complete this section ============================ # train_loss = # test_loss = # test_precision_at_10 = # ============================================================================ metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) # @title Solution def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") test_loss = softmax_loss( test_user_embeddings, movie_embeddings, test_labels) train_loss = softmax_loss( train_user_embeddings, movie_embeddings, train_labels) _, test_precision_at_10 = tf.metrics.precision_at_k( labels=test_labels, predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True), k=10) metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) ###Output _____no_output_____ ###Markdown Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column). ###Code # Create feature embedding columns def make_embedding_col(key, embedding_dim): categorical_col = tf.feature_column.categorical_column_with_vocabulary_list( key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0) return tf.feature_column.embedding_column( categorical_column=categorical_col, dimension=embedding_dim, # default initializer: trancated normal with stddev=1/sqrt(dimension) combiner='mean') with tf.Graph().as_default(): softmax_model = build_softmax_model( rated_movies, embedding_cols=[ make_embedding_col("movie_id", 35), make_embedding_col("genre", 3), make_embedding_col("year", 2), ], hidden_dims=[35]) softmax_model.train( learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer) ###Output _____no_output_____ ###Markdown Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights. ###Code movie_neighbors(softmax_model, "Aladdin", DOT) movie_neighbors(softmax_model, "Aladdin", COSINE) movie_embedding_norm([reg_model, softmax_model]) tsne_movie_embeddings(softmax_model) ###Output _____no_output_____ ###Markdown Copyright 2018 Google LLC. ###Code # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages. ###Code # @title Imports (run this cell) from __future__ import print_function import numpy as np import pandas as pd import collections from mpl_toolkits.mplot3d import Axes3D from IPython import display from matplotlib import pyplot as plt import sklearn import sklearn.manifold import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) # Add some convenience functions to Pandas DataFrame. pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.3f}'.format def mask(df, key, function): """Returns a filtered dataframe, by applying function to key""" return df[function(df[key])] def flatten_cols(df): df.columns = [' '.join(col).strip() for col in df.columns.values] return df pd.DataFrame.mask = mask pd.DataFrame.flatten_cols = flatten_cols # Install Altair and activate its colab renderer. print("Installing Altair...") !pip install git+git://github.com/altair-viz/altair.git import altair as alt alt.data_transformers.enable('default', max_rows=None) alt.renderers.enable('colab') print("Done installing Altair.") # Install spreadsheets and import authentication module. USER_RATINGS = False !pip install --upgrade -q gspread from google.colab import auth import gspread from oauth2client.client import GoogleCredentials ###Output _____no_output_____ ###Markdown We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings. ###Code # @title Load the MovieLens data (run this cell). # Download MovieLens data. print("Downloading movielens data...") import urllib import zipfile urllib.urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip") zip_ref = zipfile.ZipFile('movielens.zip', "r") zip_ref.extractall() print("Done. Dataset contains:") print(zip_ref.read('ml-100k/u.info')) # Load each data set (users, movies, and ratings). users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] users = pd.read_csv( 'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1') ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv( 'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1') # The movies file contains a binary feature for each genre. genre_cols = [ "genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] movies_cols = [ 'movie_id', 'title', 'release_date', "video_release_date", "imdb_url" ] + genre_cols movies = pd.read_csv( 'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1') # Since the ids start at 1, we shift them to start at 0. users["user_id"] = users["user_id"].apply(lambda x: str(x-1)) movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1)) movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1]) ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1)) ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1)) ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Compute the number of movies to which a genre is assigned. genre_occurences = movies[genre_cols].sum().to_dict() # Since some movies can belong to more than one genre, we create different # 'genre' columns as follows: # - all_genres: all the active genres of the movie. # - genre: randomly sampled from the active genres. def mark_genres(movies, genres): def get_random_genre(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return np.random.choice(active) def get_all_genres(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return '-'.join(active) movies['genre'] = [ get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])] movies['all_genres'] = [ get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])] mark_genres(movies, genre_cols) # Create one merged DataFrame containing all the movielens data. movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id') # Utility to split the data into training and test sets. def split_dataframe(df, holdout_fraction=0.1): """Splits a DataFrame into training and test sets. Args: df: a dataframe. holdout_fraction: fraction of dataframe rows to use in the test set. Returns: train: dataframe for training test: dataframe for testing """ test = df.sample(frac=holdout_fraction, replace=False) train = df[~df.index.isin(test.index)] return train, test ###Output _____no_output_____ ###Markdown I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features. ###Code users.describe() ###Output _____no_output_____ ###Markdown We can also print some basic statistics describing the categorical user features ###Code users.describe(include=[np.object]) ###Output _____no_output_____ ###Markdown We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart. ###Code # @title Altair visualization code (run this cell) # The following functions are used to generate interactive Altair charts. # We will display histograms of the data, sliced by a given attribute. # Create filters to be used to slice the data. occupation_filter = alt.selection_multi(fields=["occupation"]) occupation_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y("occupation:N"), color=alt.condition( occupation_filter, alt.Color("occupation:N", scale=alt.Scale(scheme='category20')), alt.value("lightgray")), ).properties(width=300, height=300, selection=occupation_filter) # A function that generates a histogram of filtered data. def filtered_hist(field, label, filter): """Creates a layered chart of histograms. The first layer (light gray) contains the histogram of the full data, and the second contains the histogram of the filtered data. Args: field: the field for which to generate the histogram. label: String label of the histogram. filter: an alt.Selection object to be used to filter the data. """ base = alt.Chart().mark_bar().encode( x=alt.X(field, bin=alt.Bin(maxbins=10), title=label), y="count()", ).properties( width=300, ) return alt.layer( base.transform_filter(filter), base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)), ).resolve_scale(y='independent') ###Output _____no_output_____ ###Markdown Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations? ###Code users_ratings = ( ratings .groupby('user_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols() .merge(users, on='user_id') ) # Create a chart for the count, and one for the mean. alt.hconcat( filtered_hist('rating count', '# ratings / user', occupation_filter), filtered_hist('rating mean', 'mean user rating', occupation_filter), occupation_chart, data=users_ratings) ###Output _____no_output_____ ###Markdown MoviesIt is also useful to look at information about the movies and their ratings. ###Code movies_ratings = movies.merge( ratings .groupby('movie_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols(), on='movie_id') genre_filter = alt.selection_multi(fields=['genre']) genre_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y('genre'), color=alt.condition( genre_filter, alt.Color("genre:N"), alt.value('lightgray')) ).properties(height=300, selection=genre_filter) (movies_ratings[['title', 'rating count', 'rating mean']] .sort_values('rating count', ascending=False) .head(10)) (movies_ratings[['title', 'rating count', 'rating mean']] .mask('rating count', lambda x: x > 20) .sort_values('rating mean', ascending=False) .head(10)) ###Output _____no_output_____ ###Markdown Finally, the last chart shows the distribution of the number of ratings and average rating. ###Code # Display the number of ratings and average rating per movie. alt.hconcat( filtered_hist('rating count', '# ratings / movie', genre_filter), filtered_hist('rating mean', 'mean movie rating', genre_filter), genre_chart, data=movies_ratings) ###Output _____no_output_____ ###Markdown II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`. ###Code def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: A tf.SparseTensor representing the ratings matrix. """ # ========================= Complete this section ============================ # indices = # values = # ============================================================================ return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) #@title Solution def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: a tf.SparseTensor representing the ratings matrix. """ indices = ratings_df[['user_id', 'movie_id']].values values = ratings_df['rating'].values return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) ###Output _____no_output_____ ###Markdown Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful. ###Code def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ # ========================= Complete this section ============================ # loss = # ============================================================================ return loss #@title Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.gather_nd( tf.matmul(user_embeddings, movie_embeddings, transpose_b=True), sparse_ratings.indices) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible. ###Code #@title Alternate Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.reduce_sum( tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) * tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]), axis=1) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'. ###Code USER_RATINGS = True #@param {type:"boolean"} # @title Run to create a spreadsheet, then use it to enter your ratings. # Authenticate user. if USER_RATINGS: auth.authenticate_user() gc = gspread.authorize(GoogleCredentials.get_application_default()) # Create the spreadsheet and print a link to it. try: sh = gc.open('MovieLens-test') except(gspread.SpreadsheetNotFound): sh = gc.create('MovieLens-test') worksheet = sh.sheet1 titles = movies['title'].values cell_list = worksheet.range(1, 1, len(titles), 1) for cell, title in zip(cell_list, titles): cell.value = title worksheet.update_cells(cell_list) print("Link to the spreadsheet: " "https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id)) ###Output _____no_output_____ ###Markdown Run the next cell to load your ratings and add them to the main `ratings` DataFrame. ###Code # @title Run to load your ratings. # Load the ratings from the spreadsheet and create a DataFrame. if USER_RATINGS: my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index() my_ratings = my_ratings[my_ratings[1] != ''] my_ratings = pd.DataFrame({ 'user_id': "943", 'movie_id': map(str, my_ratings['index']), 'rating': map(float, my_ratings[1]), }) # Remove previous ratings. ratings = ratings[ratings.user_id != "943"] # Add new ratings. ratings = ratings.append(my_ratings, ignore_index=True) # Add new user to the users DataFrame. if users.shape[0] == 943: users = users.append(users.iloc[942], ignore_index=True) users["user_id"][943] = "943" print("Added your %d ratings; you have great taste!" % len(my_ratings)) ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']]) ###Output _____no_output_____ ###Markdown III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']``` ###Code # @title CFModel helper class (run this cell) class CFModel(object): """Simple class that represents a collaborative filtering model""" def __init__(self, embedding_vars, loss, metrics=None): """Initializes a CFModel. Args: embedding_vars: A dictionary of tf.Variables. loss: A float Tensor. The loss to optimize. metrics: optional list of dictionaries of Tensors. The metrics in each dictionary will be plotted in a separate figure during training. """ self._embedding_vars = embedding_vars self._loss = loss self._metrics = metrics self._embeddings = {k: None for k in embedding_vars} self._session = None @property def embeddings(self): """The embeddings dictionary.""" return self._embeddings def train(self, num_iterations=100, learning_rate=1.0, plot_results=True, optimizer=tf.train.GradientDescentOptimizer): """Trains the model. Args: iterations: number of iterations to run. learning_rate: optimizer learning rate. plot_results: whether to plot the results at the end of training. optimizer: the optimizer to use. Default to GradientDescentOptimizer. Returns: The metrics dictionary evaluated at the last iteration. """ with self._loss.graph.as_default(): opt = optimizer(learning_rate) train_op = opt.minimize(self._loss) local_init_op = tf.group( tf.variables_initializer(opt.variables()), tf.local_variables_initializer()) if self._session is None: self._session = tf.Session() with self._session.as_default(): self._session.run(tf.global_variables_initializer()) self._session.run(tf.tables_initializer()) tf.train.start_queue_runners() with self._session.as_default(): local_init_op.run() iterations = [] metrics = self._metrics or ({},) metrics_vals = [collections.defaultdict(list) for _ in self._metrics] # Train and append results. for i in range(num_iterations + 1): _, results = self._session.run((train_op, metrics)) if (i % 10 == 0) or i == num_iterations: print("\r iteration %d: " % i + ", ".join( ["%s=%f" % (k, v) for r in results for k, v in r.items()]), end='') iterations.append(i) for metric_val, result in zip(metrics_vals, results): for k, v in result.items(): metric_val[k].append(v) for k, v in self._embedding_vars.items(): self._embeddings[k] = v.eval() if plot_results: # Plot the metrics. num_subplots = len(metrics)+1 fig = plt.figure() fig.set_size_inches(num_subplots*10, 8) for i, metric_vals in enumerate(metrics_vals): ax = fig.add_subplot(1, num_subplots, i+1) for k, v in metric_vals.items(): ax.plot(iterations, v, label=k) ax.set_xlim([1, num_iterations]) ax.legend() return results ###Output _____no_output_____ ###Markdown Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses. ###Code def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. # ========================= Complete this section ============================ # A_train = # A_test = # ============================================================================ # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # train_loss = # test_loss = # ============================================================================ metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) #@title Solution def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) train_loss = sparse_mean_square_error(A_train, U, V) test_loss = sparse_mean_square_error(A_test, U, V) metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) ###Output _____no_output_____ ###Markdown Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings. ###Code # Build the CF model and train it. model = build_model(ratings, embedding_dim=30, init_stddev=0.5) model.train(num_iterations=1000, learning_rate=10.) ###Output _____no_output_____ ###Markdown The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array. ###Code DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ # ========================= Complete this section ============================ # scores = # ============================================================================ return scores #@title Solution DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ u = query_embedding V = item_embeddings if measure == COSINE: V = V / np.linalg.norm(V, axis=1, keepdims=True) u = u / np.linalg.norm(u) scores = u.dot(V.T) return scores ###Output _____no_output_____ ###Markdown Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding. ###Code # @title User recommendations and nearest neighbors (run this cell) def user_recommendations(model, measure=DOT, exclude_rated=False, k=6): if USER_RATINGS: scores = compute_scores( model.embeddings["user_id"][943], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'movie_id': movies['movie_id'], 'titles': movies['title'], 'genres': movies['all_genres'], }) if exclude_rated: # remove movies that are already rated rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)] display.display(df.sort_values([score_key], ascending=False).head(k)) def movie_neighbors(model, title_substring, measure=DOT, k=6): # Search for movie ids that match the given substring. ids = movies[movies['title'].str.contains(title_substring)].index.values titles = movies.iloc[ids]['title'].values if len(titles) == 0: raise ValueError("Found no movies with title %s" % title_substring) print("Nearest neighbors of : %s." % titles[0]) if len(titles) > 1: print("[Found more than one matching movie. Other candidates: {}]".format( ", ".join(titles[1:]))) movie_id = ids[0] scores = compute_scores( model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'titles': movies['title'], 'genres': movies['all_genres'] }) display.display(df.sort_values([score_key], ascending=False).head(k)) ###Output _____no_output_____ ###Markdown Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you. ###Code user_recommendations(model, measure=COSINE, k=5) ###Output _____no_output_____ ###Markdown How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies. ###Code movie_neighbors(model, "Aladdin", DOT) movie_neighbors(model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell. ###Code # @title Embedding Visualization code (run this cell) def movie_embedding_norm(models): """Visualizes the norm and number of ratings of the movie embeddings. Args: model: A MFModel object. """ if not isinstance(models, list): models = [models] df = pd.DataFrame({ 'title': movies['title'], 'genre': movies['genre'], 'num_ratings': movies_ratings['rating count'], }) charts = [] brush = alt.selection_interval() for i, model in enumerate(models): norm_key = 'norm'+str(i) df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1) nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x='num_ratings', y=norm_key, color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray')) ).properties( selection=nearest).add_selection(brush) text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode( x='num_ratings', y=norm_key, text=alt.condition(nearest, 'title', alt.value(''))) charts.append(alt.layer(base, text)) return alt.hconcat(*charts, data=df) def visualize_movie_embeddings(data, x, y): nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x=x, y=y, color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")), ).properties( width=600, height=600, selection=nearest) text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode( x=x, y=y, text=alt.condition(nearest, 'title', alt.value(''))) return alt.hconcat(alt.layer(base, text), genre_chart, data=data) def tsne_movie_embeddings(model): """Visualizes the movie embeddings, projected using t-SNE with Cosine measure. Args: model: A MFModel object. """ tsne = sklearn.manifold.TSNE( n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0, init='pca', verbose=True, n_iter=400) print('Running t-SNE...') V_proj = tsne.fit_transform(model.embeddings["movie_id"]) movies.loc[:,'x'] = V_proj[:, 0] movies.loc[:,'y'] = V_proj[:, 1] return visualize_movie_embeddings(movies, 'x', 'y') movie_embedding_norm(model) ###Output _____no_output_____ ###Markdown Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies? ###Code #@title Solution model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05) model_lowinit.train(num_iterations=1000, learning_rate=10.) movie_neighbors(model_lowinit, "Aladdin", DOT) movie_neighbors(model_lowinit, "Aladdin", COSINE) movie_embedding_norm([model, model_lowinit]) ###Output _____no_output_____ ###Markdown Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/). ###Code tsne_movie_embeddings(model_lowinit) ###Output _____no_output_____ ###Markdown You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$. ###Code def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # error_train = # error_test = # gravity_loss = # regularization_loss = # ============================================================================ total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error': error_train, 'test_error': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) # @title Solution def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) error_train = sparse_mean_square_error(A_train, U, V) error_test = sparse_mean_square_error(A_test, U, V) gravity_loss = gravity_coeff * gravity(U, V) regularization_loss = regularization_coeff * ( tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value) total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error_observed': error_train, 'test_error_observed': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) ###Output _____no_output_____ ###Markdown It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions. ###Code reg_model = build_regularized_model( ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35, init_stddev=.05) reg_model.train(num_iterations=2000, learning_rate=20.) ###Output _____no_output_____ ###Markdown Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better. ###Code user_recommendations(reg_model, DOT, exclude_rated=True, k=10) ###Output _____no_output_____ ###Markdown Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings. ###Code movie_neighbors(reg_model, "Aladdin", DOT) movie_neighbors(reg_model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously. ###Code movie_embedding_norm([model, model_lowinit, reg_model]) # Visualize the embeddings tsne_movie_embeddings(reg_model) ###Output _____no_output_____ ###Markdown We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id. ###Code rated_movies = (ratings[["user_id", "movie_id"]] .groupby("user_id", as_index=False) .aggregate(lambda x: list(x))) rated_movies.head() ###Output _____no_output_____ ###Markdown We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year. ###Code #@title Batch generation code (run this cell) years_dict = { movie: year for movie, year in zip(movies["movie_id"], movies["year"]) } genres_dict = { movie: genres.split('-') for movie, genres in zip(movies["movie_id"], movies["all_genres"]) } def make_batch(ratings, batch_size): """Creates a batch of examples. Args: ratings: A DataFrame of ratings such that examples["movie_id"] is a list of movies rated by a user. batch_size: The batch size. """ def pad(x, fill): return pd.DataFrame.from_dict(x).fillna(fill).values movie = [] year = [] genre = [] label = [] for movie_ids in ratings["movie_id"].values: movie.append(movie_ids) genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]]) year.append([years_dict[movie_id] for movie_id in movie_ids]) label.append([int(movie_id) for movie_id in movie_ids]) features = { "movie_id": pad(movie, ""), "year": pad(year, ""), "genre": pad(genre, ""), "label": pad(label, -1) } batch = ( tf.data.Dataset.from_tensor_slices(features) .shuffle(1000) .repeat() .batch(batch_size) .make_one_shot_iterator() .get_next()) return batch def select_random(x): """Selectes a random elements from each row of x.""" def to_float(x): return tf.cast(x, tf.float32) def to_int(x): return tf.cast(x, tf.int64) batch_size = tf.shape(x)[0] rn = tf.range(batch_size) nnz = to_float(tf.count_nonzero(x >= 0, axis=1)) rnd = tf.random_uniform([batch_size]) ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1) return to_int(tf.gather_nd(x, ids)) ###Output _____no_output_____ ###Markdown Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$. ###Code def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A sparse tensor of dense_shape [batch_size, 1], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # ========================= Complete this section ============================ # logits = # loss = # ============================================================================ return loss # @title Solution def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A tensor of [batch_size], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # Verify that the embddings have compatible dimensions user_emb_dim = user_embeddings.shape[1].value movie_emb_dim = movie_embeddings.shape[1].value if user_emb_dim != movie_emb_dim: raise ValueError( "The user embedding dimension %d should match the movie embedding " "dimension % d" % (user_emb_dim, movie_emb_dim)) logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) return loss ###Output _____no_output_____ ###Markdown Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.![Softmax model](https://github.com/google/eng-edu/blob/master/ml/recommendation-systems/images/softmax-model.png?raw=true)Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise). ###Code def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") # ========================= Complete this section ============================ # train_loss = # test_loss = # test_precision_at_10 = # ============================================================================ metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) # @title Solution def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") test_loss = softmax_loss( test_user_embeddings, movie_embeddings, test_labels) train_loss = softmax_loss( train_user_embeddings, movie_embeddings, train_labels) _, test_precision_at_10 = tf.metrics.precision_at_k( labels=test_labels, predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True), k=10) metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) ###Output _____no_output_____ ###Markdown Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column). ###Code # Create feature embedding columns def make_embedding_col(key, embedding_dim): categorical_col = tf.feature_column.categorical_column_with_vocabulary_list( key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0) return tf.feature_column.embedding_column( categorical_column=categorical_col, dimension=embedding_dim, # default initializer: trancated normal with stddev=1/sqrt(dimension) combiner='mean') with tf.Graph().as_default(): softmax_model = build_softmax_model( rated_movies, embedding_cols=[ make_embedding_col("movie_id", 35), make_embedding_col("genre", 3), make_embedding_col("year", 2), ], hidden_dims=[35]) softmax_model.train( learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer) ###Output _____no_output_____ ###Markdown Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights. ###Code movie_neighbors(softmax_model, "Aladdin", DOT) movie_neighbors(softmax_model, "Aladdin", COSINE) movie_embedding_norm([reg_model, softmax_model]) tsne_movie_embeddings(softmax_model) ###Output _____no_output_____ ###Markdown Copyright 2018 Google LLC. ###Code # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###Output _____no_output_____ ###Markdown Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages. ###Code # @title Imports (run this cell) from __future__ import print_function import numpy as np import pandas as pd import collections from mpl_toolkits.mplot3d import Axes3D from IPython import display from matplotlib import pyplot as plt import sklearn import sklearn.manifold import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) # Add some convenience functions to Pandas DataFrame. pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.3f}'.format def mask(df, key, function): """Returns a filtered dataframe, by applying function to key""" return df[function(df[key])] def flatten_cols(df): df.columns = [' '.join(col).strip() for col in df.columns.values] return df pd.DataFrame.mask = mask pd.DataFrame.flatten_cols = flatten_cols # Install Altair and activate its colab renderer. print("Installing Altair...") !pip install git+git://github.com/altair-viz/altair.git import altair as alt alt.data_transformers.enable('default', max_rows=None) alt.renderers.enable('colab') print("Done installing Altair.") # Install spreadsheets and import authentication module. USER_RATINGS = False !pip install --upgrade -q gspread from google.colab import auth import gspread from oauth2client.client import GoogleCredentials ###Output _____no_output_____ ###Markdown We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings. ###Code # @title Load the MovieLens data (run this cell). # Download MovieLens data. print("Downloading movielens data...") from urllib.request import urlretrieve import zipfile urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip") zip_ref = zipfile.ZipFile('movielens.zip', "r") zip_ref.extractall() print("Done. Dataset contains:") print(zip_ref.read('ml-100k/u.info')) # Load each data set (users, movies, and ratings). users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code'] users = pd.read_csv( 'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1') ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp'] ratings = pd.read_csv( 'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1') # The movies file contains a binary feature for each genre. genre_cols = [ "genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western" ] movies_cols = [ 'movie_id', 'title', 'release_date', "video_release_date", "imdb_url" ] + genre_cols movies = pd.read_csv( 'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1') # Since the ids start at 1, we shift them to start at 0. users["user_id"] = users["user_id"].apply(lambda x: str(x-1)) movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1)) movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1]) ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1)) ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1)) ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Compute the number of movies to which a genre is assigned. genre_occurences = movies[genre_cols].sum().to_dict() # Since some movies can belong to more than one genre, we create different # 'genre' columns as follows: # - all_genres: all the active genres of the movie. # - genre: randomly sampled from the active genres. def mark_genres(movies, genres): def get_random_genre(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return np.random.choice(active) def get_all_genres(gs): active = [genre for genre, g in zip(genres, gs) if g==1] if len(active) == 0: return 'Other' return '-'.join(active) movies['genre'] = [ get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])] movies['all_genres'] = [ get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])] mark_genres(movies, genre_cols) # Create one merged DataFrame containing all the movielens data. movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id') # Utility to split the data into training and test sets. def split_dataframe(df, holdout_fraction=0.1): """Splits a DataFrame into training and test sets. Args: df: a dataframe. holdout_fraction: fraction of dataframe rows to use in the test set. Returns: train: dataframe for training test: dataframe for testing """ test = df.sample(frac=holdout_fraction, replace=False) train = df[~df.index.isin(test.index)] return train, test ###Output _____no_output_____ ###Markdown I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features. ###Code users.describe() ###Output _____no_output_____ ###Markdown We can also print some basic statistics describing the categorical user features ###Code users.describe(include=[np.object]) ###Output _____no_output_____ ###Markdown We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart. ###Code # @title Altair visualization code (run this cell) # The following functions are used to generate interactive Altair charts. # We will display histograms of the data, sliced by a given attribute. # Create filters to be used to slice the data. occupation_filter = alt.selection_multi(fields=["occupation"]) occupation_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y("occupation:N"), color=alt.condition( occupation_filter, alt.Color("occupation:N", scale=alt.Scale(scheme='category20')), alt.value("lightgray")), ).properties(width=300, height=300, selection=occupation_filter) # A function that generates a histogram of filtered data. def filtered_hist(field, label, filter): """Creates a layered chart of histograms. The first layer (light gray) contains the histogram of the full data, and the second contains the histogram of the filtered data. Args: field: the field for which to generate the histogram. label: String label of the histogram. filter: an alt.Selection object to be used to filter the data. """ base = alt.Chart().mark_bar().encode( x=alt.X(field, bin=alt.Bin(maxbins=10), title=label), y="count()", ).properties( width=300, ) return alt.layer( base.transform_filter(filter), base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)), ).resolve_scale(y='independent') ###Output _____no_output_____ ###Markdown Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations? ###Code users_ratings = ( ratings .groupby('user_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols() .merge(users, on='user_id') ) # Create a chart for the count, and one for the mean. alt.hconcat( filtered_hist('rating count', '# ratings / user', occupation_filter), filtered_hist('rating mean', 'mean user rating', occupation_filter), occupation_chart, data=users_ratings) ###Output _____no_output_____ ###Markdown MoviesIt is also useful to look at information about the movies and their ratings. ###Code movies_ratings = movies.merge( ratings .groupby('movie_id', as_index=False) .agg({'rating': ['count', 'mean']}) .flatten_cols(), on='movie_id') genre_filter = alt.selection_multi(fields=['genre']) genre_chart = alt.Chart().mark_bar().encode( x="count()", y=alt.Y('genre'), color=alt.condition( genre_filter, alt.Color("genre:N"), alt.value('lightgray')) ).properties(height=300, selection=genre_filter) (movies_ratings[['title', 'rating count', 'rating mean']] .sort_values('rating count', ascending=False) .head(10)) (movies_ratings[['title', 'rating count', 'rating mean']] .mask('rating count', lambda x: x > 20) .sort_values('rating mean', ascending=False) .head(10)) ###Output _____no_output_____ ###Markdown Finally, the last chart shows the distribution of the number of ratings and average rating. ###Code # Display the number of ratings and average rating per movie. alt.hconcat( filtered_hist('rating count', '# ratings / movie', genre_filter), filtered_hist('rating mean', 'mean movie rating', genre_filter), genre_chart, data=movies_ratings) ###Output _____no_output_____ ###Markdown II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`. ###Code def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: A tf.SparseTensor representing the ratings matrix. """ # ========================= Complete this section ============================ # indices = # values = # ============================================================================ return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) #@title Solution def build_rating_sparse_tensor(ratings_df): """ Args: ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns. Returns: a tf.SparseTensor representing the ratings matrix. """ indices = ratings_df[['user_id', 'movie_id']].values values = ratings_df['rating'].values return tf.SparseTensor( indices=indices, values=values, dense_shape=[users.shape[0], movies.shape[0]]) ###Output _____no_output_____ ###Markdown Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful. ###Code def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ # ========================= Complete this section ============================ # loss = # ============================================================================ return loss #@title Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.gather_nd( tf.matmul(user_embeddings, movie_embeddings, transpose_b=True), sparse_ratings.indices) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible. ###Code #@title Alternate Solution def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings): """ Args: sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M] user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding dimension, such that U_i is the embedding of user i. movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding dimension, such that V_j is the embedding of movie j. Returns: A scalar Tensor representing the MSE between the true ratings and the model's predictions. """ predictions = tf.reduce_sum( tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) * tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]), axis=1) loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions) return loss ###Output _____no_output_____ ###Markdown Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'. ###Code USER_RATINGS = True #@param {type:"boolean"} # @title Run to create a spreadsheet, then use it to enter your ratings. # Authenticate user. if USER_RATINGS: auth.authenticate_user() gc = gspread.authorize(GoogleCredentials.get_application_default()) # Create the spreadsheet and print a link to it. try: sh = gc.open('MovieLens-test') except(gspread.SpreadsheetNotFound): sh = gc.create('MovieLens-test') worksheet = sh.sheet1 titles = movies['title'].values cell_list = worksheet.range(1, 1, len(titles), 1) for cell, title in zip(cell_list, titles): cell.value = title worksheet.update_cells(cell_list) print("Link to the spreadsheet: " "https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id)) ###Output _____no_output_____ ###Markdown Run the next cell to load your ratings and add them to the main `ratings` DataFrame. ###Code # @title Run to load your ratings. # Load the ratings from the spreadsheet and create a DataFrame. if USER_RATINGS: my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index() my_ratings = my_ratings[my_ratings[1] != ''] my_ratings = pd.DataFrame({ 'user_id': "943", 'movie_id': list(map(str, my_ratings['index'])), 'rating': list(map(float, my_ratings[1])), }) # Remove previous ratings. ratings = ratings[ratings.user_id != "943"] # Add new ratings. ratings = ratings.append(my_ratings, ignore_index=True) # Add new user to the users DataFrame. if users.shape[0] == 943: users = users.append(users.iloc[942], ignore_index=True) users["user_id"][943] = "943" print("Added your %d ratings; you have great taste!" % len(my_ratings)) ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']]) ###Output _____no_output_____ ###Markdown III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']``` ###Code # @title CFModel helper class (run this cell) class CFModel(object): """Simple class that represents a collaborative filtering model""" def __init__(self, embedding_vars, loss, metrics=None): """Initializes a CFModel. Args: embedding_vars: A dictionary of tf.Variables. loss: A float Tensor. The loss to optimize. metrics: optional list of dictionaries of Tensors. The metrics in each dictionary will be plotted in a separate figure during training. """ self._embedding_vars = embedding_vars self._loss = loss self._metrics = metrics self._embeddings = {k: None for k in embedding_vars} self._session = None @property def embeddings(self): """The embeddings dictionary.""" return self._embeddings def train(self, num_iterations=100, learning_rate=1.0, plot_results=True, optimizer=tf.train.GradientDescentOptimizer): """Trains the model. Args: iterations: number of iterations to run. learning_rate: optimizer learning rate. plot_results: whether to plot the results at the end of training. optimizer: the optimizer to use. Default to GradientDescentOptimizer. Returns: The metrics dictionary evaluated at the last iteration. """ with self._loss.graph.as_default(): opt = optimizer(learning_rate) train_op = opt.minimize(self._loss) local_init_op = tf.group( tf.variables_initializer(opt.variables()), tf.local_variables_initializer()) if self._session is None: self._session = tf.Session() with self._session.as_default(): self._session.run(tf.global_variables_initializer()) self._session.run(tf.tables_initializer()) tf.train.start_queue_runners() with self._session.as_default(): local_init_op.run() iterations = [] metrics = self._metrics or ({},) metrics_vals = [collections.defaultdict(list) for _ in self._metrics] # Train and append results. for i in range(num_iterations + 1): _, results = self._session.run((train_op, metrics)) if (i % 10 == 0) or i == num_iterations: print("\r iteration %d: " % i + ", ".join( ["%s=%f" % (k, v) for r in results for k, v in r.items()]), end='') iterations.append(i) for metric_val, result in zip(metrics_vals, results): for k, v in result.items(): metric_val[k].append(v) for k, v in self._embedding_vars.items(): self._embeddings[k] = v.eval() if plot_results: # Plot the metrics. num_subplots = len(metrics)+1 fig = plt.figure() fig.set_size_inches(num_subplots*10, 8) for i, metric_vals in enumerate(metrics_vals): ax = fig.add_subplot(1, num_subplots, i+1) for k, v in metric_vals.items(): ax.plot(iterations, v, label=k) ax.set_xlim([1, num_iterations]) ax.legend() return results ###Output _____no_output_____ ###Markdown Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses. ###Code def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. # ========================= Complete this section ============================ # A_train = # A_test = # ============================================================================ # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # train_loss = # test_loss = # ============================================================================ metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) #@title Solution def build_model(ratings, embedding_dim=3, init_stddev=1.): """ Args: ratings: a DataFrame of the ratings embedding_dim: the dimension of the embedding vectors. init_stddev: float, the standard deviation of the random initial embeddings. Returns: model: a CFModel. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) # Initialize the embeddings using a normal distribution. U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) train_loss = sparse_mean_square_error(A_train, U, V) test_loss = sparse_mean_square_error(A_test, U, V) metrics = { 'train_error': train_loss, 'test_error': test_loss } embeddings = { "user_id": U, "movie_id": V } return CFModel(embeddings, train_loss, [metrics]) ###Output _____no_output_____ ###Markdown Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings. ###Code # Build the CF model and train it. model = build_model(ratings, embedding_dim=30, init_stddev=0.5) model.train(num_iterations=1000, learning_rate=10.) ###Output _____no_output_____ ###Markdown The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array. ###Code DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ # ========================= Complete this section ============================ # scores = # ============================================================================ return scores #@title Solution DOT = 'dot' COSINE = 'cosine' def compute_scores(query_embedding, item_embeddings, measure=DOT): """Computes the scores of the candidates given a query. Args: query_embedding: a vector of shape [k], representing the query embedding. item_embeddings: a matrix of shape [N, k], such that row i is the embedding of item i. measure: a string specifying the similarity measure to be used. Can be either DOT or COSINE. Returns: scores: a vector of shape [N], such that scores[i] is the score of item i. """ u = query_embedding V = item_embeddings if measure == COSINE: V = V / np.linalg.norm(V, axis=1, keepdims=True) u = u / np.linalg.norm(u) scores = u.dot(V.T) return scores ###Output _____no_output_____ ###Markdown Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding. ###Code # @title User recommendations and nearest neighbors (run this cell) def user_recommendations(model, measure=DOT, exclude_rated=False, k=6): if USER_RATINGS: scores = compute_scores( model.embeddings["user_id"][943], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'movie_id': movies['movie_id'], 'titles': movies['title'], 'genres': movies['all_genres'], }) if exclude_rated: # remove movies that are already rated rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)] display.display(df.sort_values([score_key], ascending=False).head(k)) def movie_neighbors(model, title_substring, measure=DOT, k=6): # Search for movie ids that match the given substring. ids = movies[movies['title'].str.contains(title_substring)].index.values titles = movies.iloc[ids]['title'].values if len(titles) == 0: raise ValueError("Found no movies with title %s" % title_substring) print("Nearest neighbors of : %s." % titles[0]) if len(titles) > 1: print("[Found more than one matching movie. Other candidates: {}]".format( ", ".join(titles[1:]))) movie_id = ids[0] scores = compute_scores( model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"], measure) score_key = measure + ' score' df = pd.DataFrame({ score_key: list(scores), 'titles': movies['title'], 'genres': movies['all_genres'] }) display.display(df.sort_values([score_key], ascending=False).head(k)) ###Output _____no_output_____ ###Markdown Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you. ###Code user_recommendations(model, measure=COSINE, k=5) ###Output _____no_output_____ ###Markdown How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies. ###Code movie_neighbors(model, "Aladdin", DOT) movie_neighbors(model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell. ###Code # @title Embedding Visualization code (run this cell) def movie_embedding_norm(models): """Visualizes the norm and number of ratings of the movie embeddings. Args: model: A MFModel object. """ if not isinstance(models, list): models = [models] df = pd.DataFrame({ 'title': movies['title'], 'genre': movies['genre'], 'num_ratings': movies_ratings['rating count'], }) charts = [] brush = alt.selection_interval() for i, model in enumerate(models): norm_key = 'norm'+str(i) df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1) nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x='num_ratings', y=norm_key, color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray')) ).properties( selection=nearest).add_selection(brush) text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode( x='num_ratings', y=norm_key, text=alt.condition(nearest, 'title', alt.value(''))) charts.append(alt.layer(base, text)) return alt.hconcat(*charts, data=df) def visualize_movie_embeddings(data, x, y): nearest = alt.selection( type='single', encodings=['x', 'y'], on='mouseover', nearest=True, empty='none') base = alt.Chart().mark_circle().encode( x=x, y=y, color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")), ).properties( width=600, height=600, selection=nearest) text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode( x=x, y=y, text=alt.condition(nearest, 'title', alt.value(''))) return alt.hconcat(alt.layer(base, text), genre_chart, data=data) def tsne_movie_embeddings(model): """Visualizes the movie embeddings, projected using t-SNE with Cosine measure. Args: model: A MFModel object. """ tsne = sklearn.manifold.TSNE( n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0, init='pca', verbose=True, n_iter=400) print('Running t-SNE...') V_proj = tsne.fit_transform(model.embeddings["movie_id"]) movies.loc[:,'x'] = V_proj[:, 0] movies.loc[:,'y'] = V_proj[:, 1] return visualize_movie_embeddings(movies, 'x', 'y') movie_embedding_norm(model) ###Output _____no_output_____ ###Markdown Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies? ###Code #@title Solution model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05) model_lowinit.train(num_iterations=1000, learning_rate=10.) movie_neighbors(model_lowinit, "Aladdin", DOT) movie_neighbors(model_lowinit, "Aladdin", COSINE) movie_embedding_norm([model, model_lowinit]) ###Output _____no_output_____ ###Markdown Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/). ###Code tsne_movie_embeddings(model_lowinit) ###Output _____no_output_____ ###Markdown You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$. ###Code def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) # ========================= Complete this section ============================ # error_train = # error_test = # gravity_loss = # regularization_loss = # ============================================================================ total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error': error_train, 'test_error': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) # @title Solution def gravity(U, V): """Creates a gravity loss given two embedding matrices.""" return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum( tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True)) def build_regularized_model( ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1., init_stddev=0.1): """ Args: ratings: the DataFrame of movie ratings. embedding_dim: The dimension of the embedding space. regularization_coeff: The regularization coefficient lambda. gravity_coeff: The gravity regularization coefficient lambda_g. Returns: A CFModel object that uses a regularized loss. """ # Split the ratings DataFrame into train and test. train_ratings, test_ratings = split_dataframe(ratings) # SparseTensor representation of the train and test datasets. A_train = build_rating_sparse_tensor(train_ratings) A_test = build_rating_sparse_tensor(test_ratings) U = tf.Variable(tf.random_normal( [A_train.dense_shape[0], embedding_dim], stddev=init_stddev)) V = tf.Variable(tf.random_normal( [A_train.dense_shape[1], embedding_dim], stddev=init_stddev)) error_train = sparse_mean_square_error(A_train, U, V) error_test = sparse_mean_square_error(A_test, U, V) gravity_loss = gravity_coeff * gravity(U, V) regularization_loss = regularization_coeff * ( tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value) total_loss = error_train + regularization_loss + gravity_loss losses = { 'train_error_observed': error_train, 'test_error_observed': error_test, } loss_components = { 'observed_loss': error_train, 'regularization_loss': regularization_loss, 'gravity_loss': gravity_loss, } embeddings = {"user_id": U, "movie_id": V} return CFModel(embeddings, total_loss, [losses, loss_components]) ###Output _____no_output_____ ###Markdown It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions. ###Code reg_model = build_regularized_model( ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35, init_stddev=.05) reg_model.train(num_iterations=2000, learning_rate=20.) ###Output _____no_output_____ ###Markdown Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better. ###Code user_recommendations(reg_model, DOT, exclude_rated=True, k=10) ###Output _____no_output_____ ###Markdown Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings. ###Code movie_neighbors(reg_model, "Aladdin", DOT) movie_neighbors(reg_model, "Aladdin", COSINE) ###Output _____no_output_____ ###Markdown Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously. ###Code movie_embedding_norm([model, model_lowinit, reg_model]) # Visualize the embeddings tsne_movie_embeddings(reg_model) ###Output _____no_output_____ ###Markdown We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id. ###Code rated_movies = (ratings[["user_id", "movie_id"]] .groupby("user_id", as_index=False) .aggregate(lambda x: list(x))) rated_movies.head() ###Output _____no_output_____ ###Markdown We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year. ###Code #@title Batch generation code (run this cell) years_dict = { movie: year for movie, year in zip(movies["movie_id"], movies["year"]) } genres_dict = { movie: genres.split('-') for movie, genres in zip(movies["movie_id"], movies["all_genres"]) } def make_batch(ratings, batch_size): """Creates a batch of examples. Args: ratings: A DataFrame of ratings such that examples["movie_id"] is a list of movies rated by a user. batch_size: The batch size. """ def pad(x, fill): return pd.DataFrame.from_dict(x).fillna(fill).values movie = [] year = [] genre = [] label = [] for movie_ids in ratings["movie_id"].values: movie.append(movie_ids) genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]]) year.append([years_dict[movie_id] for movie_id in movie_ids]) label.append([int(movie_id) for movie_id in movie_ids]) features = { "movie_id": pad(movie, ""), "year": pad(year, ""), "genre": pad(genre, ""), "label": pad(label, -1) } batch = ( tf.data.Dataset.from_tensor_slices(features) .shuffle(1000) .repeat() .batch(batch_size) .make_one_shot_iterator() .get_next()) return batch def select_random(x): """Selectes a random elements from each row of x.""" def to_float(x): return tf.cast(x, tf.float32) def to_int(x): return tf.cast(x, tf.int64) batch_size = tf.shape(x)[0] rn = tf.range(batch_size) nnz = to_float(tf.count_nonzero(x >= 0, axis=1)) rnd = tf.random_uniform([batch_size]) ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1) return to_int(tf.gather_nd(x, ids)) ###Output _____no_output_____ ###Markdown Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$. ###Code def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A sparse tensor of dense_shape [batch_size, 1], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # ========================= Complete this section ============================ # logits = # loss = # ============================================================================ return loss # @title Solution def softmax_loss(user_embeddings, movie_embeddings, labels): """Returns the cross-entropy loss of the softmax model. Args: user_embeddings: A tensor of shape [batch_size, embedding_dim]. movie_embeddings: A tensor of shape [num_movies, embedding_dim]. labels: A tensor of [batch_size], such that labels[i] is the target label for example i. Returns: The mean cross-entropy loss. """ # Verify that the embddings have compatible dimensions user_emb_dim = user_embeddings.shape[1].value movie_emb_dim = movie_embeddings.shape[1].value if user_emb_dim != movie_emb_dim: raise ValueError( "The user embedding dimension %d should match the movie embedding " "dimension % d" % (user_emb_dim, movie_emb_dim)) logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels)) return loss ###Output _____no_output_____ ###Markdown Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.![Softmax model](https://github.com/google/eng-edu/blob/master/ml/recommendation-systems/images/softmax-model.png?raw=true)Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise). ###Code def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") # ========================= Complete this section ============================ # train_loss = # test_loss = # test_precision_at_10 = # ============================================================================ metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) # @title Solution def build_softmax_model(rated_movies, embedding_cols, hidden_dims): """Builds a Softmax model for MovieLens. Args: rated_movies: DataFrame of traing examples. embedding_cols: A dictionary mapping feature names (string) to embedding column objects. This will be used in tf.feature_column.input_layer() to create the input layer. hidden_dims: int list of the dimensions of the hidden layers. Returns: A CFModel object. """ def create_network(features): """Maps input features dictionary to user embeddings. Args: features: A dictionary of input string tensors. Returns: outputs: A tensor of shape [batch_size, embedding_dim]. """ # Create a bag-of-words embedding for each sparse feature. inputs = tf.feature_column.input_layer(features, embedding_cols) # Hidden layers. input_dim = inputs.shape[1].value for i, output_dim in enumerate(hidden_dims): w = tf.get_variable( "hidden%d_w_" % i, shape=[input_dim, output_dim], initializer=tf.truncated_normal_initializer( stddev=1./np.sqrt(output_dim))) / 10. outputs = tf.matmul(inputs, w) input_dim = output_dim inputs = outputs return outputs train_rated_movies, test_rated_movies = split_dataframe(rated_movies) train_batch = make_batch(train_rated_movies, 200) test_batch = make_batch(test_rated_movies, 100) with tf.variable_scope("model", reuse=False): # Train train_user_embeddings = create_network(train_batch) train_labels = select_random(train_batch["label"]) with tf.variable_scope("model", reuse=True): # Test test_user_embeddings = create_network(test_batch) test_labels = select_random(test_batch["label"]) movie_embeddings = tf.get_variable( "input_layer/movie_id_embedding/embedding_weights") test_loss = softmax_loss( test_user_embeddings, movie_embeddings, test_labels) train_loss = softmax_loss( train_user_embeddings, movie_embeddings, train_labels) _, test_precision_at_10 = tf.metrics.precision_at_k( labels=test_labels, predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True), k=10) metrics = ( {"train_loss": train_loss, "test_loss": test_loss}, {"test_precision_at_10": test_precision_at_10} ) embeddings = {"movie_id": movie_embeddings} return CFModel(embeddings, train_loss, metrics) ###Output _____no_output_____ ###Markdown Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column). ###Code # Create feature embedding columns def make_embedding_col(key, embedding_dim): categorical_col = tf.feature_column.categorical_column_with_vocabulary_list( key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0) return tf.feature_column.embedding_column( categorical_column=categorical_col, dimension=embedding_dim, # default initializer: trancated normal with stddev=1/sqrt(dimension) combiner='mean') with tf.Graph().as_default(): softmax_model = build_softmax_model( rated_movies, embedding_cols=[ make_embedding_col("movie_id", 35), make_embedding_col("genre", 3), make_embedding_col("year", 2), ], hidden_dims=[35]) softmax_model.train( learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer) ###Output _____no_output_____ ###Markdown Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights. ###Code movie_neighbors(softmax_model, "Aladdin", DOT) movie_neighbors(softmax_model, "Aladdin", COSINE) movie_embedding_norm([reg_model, softmax_model]) tsne_movie_embeddings(softmax_model) ###Output _____no_output_____
10_pipeline/airflow/02_Create_Airflow_Environment.ipynb
###Markdown Verify S3_BUCKET Bucket Creation ###Code import boto3 import time session = boto3.session.Session() region = session.region_name account_id = boto3.client("sts").get_caller_identity().get("Account") s3 = boto3.Session().client(service_name="s3", region_name=region) setup_s3_bucket_passed = False %store -r airflow_bucket_name %store -r s3_mwaa_private_path %store -r s3_mwaa_dags_private_path %store -r airflow_env_name %store -r airflow_vpc_name %store -r team_role_arn %store -r airflow_sg_id %store -r airflow_subnet_ids !aws s3 ls $s3_mwaa_private_path from botocore.client import ClientError response = None try: response = s3.head_bucket(Bucket=airflow_bucket_name) print(response) setup_s3_bucket_passed = True except ClientError as e: print("[ERROR] Cannot find bucket {} in {} due to {}.".format(airflow_bucket_name, response, e)) # %store setup_s3_bucket_passed ###Output _____no_output_____ ###Markdown Create Managed Apache Airflow Environment ###Code mwaa = boto3.client("mwaa") s3_mwaa_bucket_arn = "arn:aws:s3:::{}".format(airflow_bucket_name) airflow_env_arn = mwaa.create_environment( DagS3Path="dags", ExecutionRoleArn=team_role_arn, AirflowVersion="1.10.12", WebserverAccessMode="PUBLIC_ONLY", LoggingConfiguration={ "DagProcessingLogs": {"Enabled": True, "LogLevel": "ERROR"}, "SchedulerLogs": {"Enabled": True, "LogLevel": "ERROR"}, "TaskLogs": {"Enabled": True, "LogLevel": "INFO"}, "WebserverLogs": {"Enabled": True, "LogLevel": "ERROR"}, "WorkerLogs": {"Enabled": True, "LogLevel": "ERROR"}, }, MaxWorkers=3, Name=airflow_env_name, NetworkConfiguration={ "SecurityGroupIds": [ airflow_sg_id, ], "SubnetIds": airflow_subnet_ids, }, RequirementsS3ObjectVersion="latest", RequirementsS3Path="requirements.txt", SourceBucketArn=s3_mwaa_bucket_arn, EnvironmentClass="mw1.small", ) %store airflow_env_arn ###Output _____no_output_____ ###Markdown Please be patient this can take around 15 Minutes. ###Code def get_airflow_check(): response = mwaa.get_environment(Name=airflow_env_name) mwaa_status = response["Environment"]["Status"] return mwaa_status mwaa_status = "CREATING" print("Checking to see if MWAA Env: {} is ready.".format(airflow_env_name)) while get_airflow_check() != "AVAILABLE": mwaa_status time.sleep(60) print("Still waiting for MWAA Environment...") print("Sucess! MWAA Env: {} is ready!".format(airflow_env_name)) ###Output _____no_output_____ ###Markdown PLEASE MAKE SURE THAT THE ABOVE COMMAND RAN SUCESSFULLY BEFORE CONTINUING ###Code response = mwaa.create_web_login_token( Name=airflow_env_name ) webServerHostName = response["WebServerHostname"] webToken = response["WebToken"] airflowUIUrl = 'https://'+webServerHostName+'/aws_mwaa/aws-console-sso?login=true#'+webToken print("Here is your AirflowUI Url:"\n) airflowUIUrl ###Output _____no_output_____ ###Markdown Release Resources ###Code %%html <p><b>Shutting down your kernel for this notebook to release resources.</b></p> <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> <script> try { els = document.getElementsByClassName("sm-command-button"); els[0].click(); } catch(err) { // NoOp } </script> %%javascript try { Jupyter.notebook.save_checkpoint(); Jupyter.notebook.session.delete(); } catch(err) { // NoOp } ###Output _____no_output_____
books/implied.ipynb
###Markdown implied countsAdjust counts for time lags and unconfirmed cases. ###Code %load_ext autoreload %autoreload 2 %autosave 0 import etl from pandas import DataFrame FIGSIZE = (9, 3) ###Output The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload ###Markdown realized countsLoad examples from [CoronaWatchNL] via [email protected].[CoronaWatchNL]: https://github.com/J535D165/CoronaWatchNL ###Code real = etl.rivm().loc[:'2020-03-30'] real.plot(figsize=FIGSIZE, logy=True) real[::5] ###Output _____no_output_____ ###Markdown implied exposuresAssumptions:- False-positive rate P(confirmed | not exposed) = 0.- Confirmation rate P(confirmed | exposed) = `cprob`.- Confirmations occur `ctime` days after exposure.**Caution:** If testing is common, then ignoring false positives leads to the [prosecutor's fallacy].[prosecutor's fallacy]: https://en.wikipedia.org/wiki/Prosecutor's_fallacy ###Code cprob = 0.01 ctime = 5 exposed = ( real['confirmed'].rename('exposed') .div(cprob).shift(-ctime, freq=real.index.freq) ) data = real.join(exposed, how='outer').diff() axes = data.plot(figsize=FIGSIZE, grid=True, logy=True, title='new cases') ###Output _____no_output_____ ###Markdown implied fatality ratesAssumptions:- Unconfirmed cases are never fatal.- Deaths occur `dtime` days after exposure.**Caution:** P(deceased | exposed) may be much less than [case fatality rate].[case fatality rate]: https://en.wikipedia.org/wiki/Case_fatality_rate ###Code dtime = ctime + 7 confirmed = real['confirmed'] deceased = real['deceased'] cfr = deceased[-1] / confirmed.iat[-(1 + dtime - ctime)] ifr = deceased[-1] / exposed.iat[-(1 + dtime)] print(f"{cfr.round(3)} deaths per confirmed case") print(f"{ifr.round(3)} deaths per implied exposure") data = DataFrame(real['deceased'].rename('realized')) data['expected'] = cfr * real['confirmed'].shift(dtime - ctime) axes = data.diff().plot(figsize=FIGSIZE, title='new fatalities') ###Output 0.181 deaths per confirmed case 0.004 deaths per implied exposure
_notebooks/2021-04-23-Matplotlib .ipynb
###Markdown Matplotlib || pltPlotting library and its numerical math extension NumPy. It provides an object oriented API for embedding plots into applications using general purpose GUI toolkits like Tkinter wxPython, QT, or GTK+ ###Code import matplotlib.pyplot as plt %matplotlib inline import numpy as np x = np.arange(0,10) y=np.arange(11,21) a=np.arange(40,50) b=np.arange(50,60) ###Output _____no_output_____ ###Markdown Scatter plot ###Code plt.scatter(x,y,c='g') # c= color plt.xlabel('X axis') plt.ylabel('Y axis') plt.title('Graph in 2D') plt.savefig('g1.png') plt.show() ###Output _____no_output_____ ###Markdown plt plot ###Code plt.plot(x,y) y=x*x plt.plot(x,y) plt.plot(x,y,'r') plt.plot(x,y,'r--') plt.plot(x,y,'r*-') ###Output _____no_output_____ ###Markdown Subplots ###Code plt.subplot(2,2,1) # 2 rows 2 cols 1 position plt.plot(x,y,'r') plt.subplot(2,2,2) plt.plot(x,y,'g') plt.subplot(2,2,3) plt.plot(x,y,'b') # compute x and y coordinates for points on a sine wave np.pi x = np.arange(0,4*np.pi,0.1) y=np.sin(x) plt.title("sine wave form") plt.plot(x,y) plt.show() #subplot for sin and cos waves x=np.arange(0,5*np.pi,0.1) y_sin = np.sin(x) y_cos = np.cos(x) plt.subplot(2,1,1) plt.plot(x,y_sin,'r--') plt.title("sine graph") plt.subplot(2,1,2) plt.plot(x,y_cos,'g--') plt.title("cosine graph") plt.show() ###Output _____no_output_____ ###Markdown Bar plot ###Code x= [2,8,10] y = [11,16,18] x2 = [3,9,11] y2 = [4,7,9] plt.bar(x,y) plt.bar(x2,y2,color ='g') plt.title('Bar graph') plt.ylabel( 'Yaxis') plt.xlabel( 'Xaxis') plt.show() ###Output _____no_output_____ ###Markdown Histograms ###Code a = np.array([1,2,3,4,5,5,6,67,7,8,8,9]) # y axis == bins - desity or count plt.hist(a) plt.title('histogram') plt.show() ###Output _____no_output_____ ###Markdown Box plot ###Code # helps to find percentiles data = [np.random.normal(0,std,100) for std in range(1,4)] # selecting a normal distribution b/w low=0, to std, step=100 # rectangular box plot plt.boxplot(data, vert=True, patch_artist= True) data ###Output _____no_output_____ ###Markdown Pi chart ###Code labels = 'python','c++', 'ruby', 'java' sizes = [215,130,245,210] colors = ['gold', 'yellowgreen','lightcoral', 'lightskyblue'] explode = (0.1,0,0,0) #explode 1st slice #plot plt.pie(sizes,explode=explode,labels=labels,colors=colors, autopct='%1.1f%%',shadow=True) plt.axis('equal') plt.show() ###Output _____no_output_____
notebooks/cbd_for_life.ipynb
###Markdown Web scraper for Fulton and Roark stores Import packages ###Code from selenium import webdriver from selenium.webdriver.common.keys import Keys from collections import namedtuple import csv ###Output _____no_output_____ ###Markdown Conect to a Firefox webdriver using Selenium ###Code driver = webdriver.Firefox() driver.get("https://cbdforlife.us/store-locator/") ###Output _____no_output_____ ###Markdown Submit zip code to form, we need to check if there's a pop-up first & close it if so ###Code submit_button = driver.find_element_by_id('storemapper-go') try: submit_button.click() except: popup = driver.find_element_by_id('popup') popup_close = popup.find_element_by_class_name('close-modal') popup_close.click() submit_button.click() ###Output _____no_output_____ ###Markdown Get a list of the stores ###Code store_list_container = driver.find_element_by_id('storemapper-list') store_list = store_list_container.find_elements_by_tag_name('li') ###Output _____no_output_____ ###Markdown Create a namedtuple to store the data ###Code store_info = namedtuple('StoreInfo', 'store_id store_name address phone') ###Output _____no_output_____ ###Markdown Loop through all the stores and get the information for each ###Code store_data = [] for store in store_list: store_id = store.get_attribute('data-idx') store_name = store.find_element_by_class_name('storemapper-title').text try: address = store.find_element_by_class_name('storemapper-address').text except: address = "No address found" try: phone = store.find_element_by_class_name('storemapper-phone').text except: phone = "No phone number found" store = store_info(store_id, store_name, address, phone) store_data.append(store) ###Output _____no_output_____ ###Markdown Write data to csv file ###Code with open('../data/cbd_for_life.csv', 'w') as f: w = csv.writer(f) w.writerow(('store_id', 'store_name', 'address', 'phone_number')) w.writerows([store.store_id, store.store_name, store.address, store.phone] for store in store_data) ###Output _____no_output_____
DigitalBiomarkers-HumanActivityRecognition/10_code/50_deep_learning/53_tensorflow_models/53_tensorflow_Duke_Data/.ipynb_checkpoints/20_ANN_window_feature_engineering_balanced-checkpoint.ipynb
###Markdown Window Feature Classification Model: ANN with Feature Engineering This file is composed of an artifical neural network classification model to evaluate if using features from windows of time (20 seconds with 10 second overlap), would generate a better model than our simple timepoint classifier. Leave-One-Person-Out (LOPO) Cross-Validation is used to validate the model. __INPUT: .csv files containing the rolled sensor data with feature engineering (engineered_features.csv)__ __OUTPUT: Neural Network Multi-Classification Window Featuer Model (F1 Score = 0.871)__ Imports ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import plot_confusion_matrix import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, accuracy_score, f1_score import seaborn as sns import tensorflow as tf ###Output _____no_output_____ ###Markdown Read in Data The loaded dataset contains windows of data that are 20 seconds long with a 10 second overlap. These are stored as arrays in the dataframe. ###Code pd.set_option('display.max_columns', None) df = pd.read_csv('/Users/N1/Data7/Data-2020/10_code/40_usable_data_for_models/41_Duke_Data/engineered_features.csv') ###Output _____no_output_____ ###Markdown We add a window number that changes everytime there is a new activity present, as we wish to use this as a feature. ###Code df = df.assign(count=df.groupby(df.Activity.ne(df.Activity.shift()).cumsum()).cumcount().add(1)) df.head(5) ###Output _____no_output_____ ###Markdown Label Encode Activity and Subject_ID We encode the y variable as we need to one-hot encode this y variable for the model. The label each class is associated with is printed below. ###Code from sklearn.preprocessing import LabelEncoder le1 = LabelEncoder() df['Activity'] = le1.fit_transform(df['Activity']) activity_name_mapping = dict(zip(le1.classes_, le1.transform(le1.classes_))) print(activity_name_mapping) le = LabelEncoder() df['Subject_ID'] = le.fit_transform(df['Subject_ID']) ###Output _____no_output_____ ###Markdown Create Test Train split ###Code np.random.seed(29) rands = np.random.choice(df.Subject_ID.unique(),3, replace=False) print(f' These will be our Subjects in our test set: {rands}') ###Output These will be our Subjects in our test set: [39 17 45] ###Markdown Split Subjects into Test and Train Sets (n=52, 3) ###Code test = df[df['Subject_ID'].isin(rands)] train = df[-df['Subject_ID'].isin(rands)] ###Output _____no_output_____ ###Markdown Feature Selection Choose features to be used in model Pick one of the three following code cells to choose what features are used in the model. Do not run them all. To uncomment or comment multiple selected lines, press control + /. All Features ###Code # train = train[['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std', # 'ACC3_std', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'Magnitude_std', 'ACC1_min', 'ACC2_min', 'ACC3_min', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min', 'Magnitude_min', # 'ACC1_max', 'ACC2_max', 'ACC3_max', 'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']] # test = test [['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std', # 'ACC3_std', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'Magnitude_std', 'ACC1_min', 'ACC2_min', 'ACC3_min', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min', 'Magnitude_min', # 'ACC1_max', 'ACC2_max', 'ACC3_max', 'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']] ###Output _____no_output_____ ###Markdown Mechanical Features ###Code # train = train[['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std', 'ACC3_std', 'Magnitude_std', # 'ACC1_min', 'ACC2_min', 'ACC3_min', 'Magnitude_min', # 'ACC1_max', 'ACC2_max', 'ACC3_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']] # test = test[['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std', 'ACC3_std', 'Magnitude_std', # 'ACC1_min', 'ACC2_min', 'ACC3_min', 'Magnitude_min', # 'ACC1_max', 'ACC2_max', 'ACC3_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']] ###Output _____no_output_____ ###Markdown Physiological Features ###Code train = train[['TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min', 'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Subject_ID', 'count', 'Activity']] test = test[['TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min', 'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Subject_ID', 'count', 'Activity']] ###Output _____no_output_____ ###Markdown Balancing Classes In the following code cells, we randomly sample data from our majority classes to balance our dataset. ###Code #{'Activity': 0, 'Baseline': 1, 'DB': 2, 'Type': 3} train['Activity'].value_counts() zero = train[train['Activity'] == 0] one = train[train['Activity'] == 1] two = train[train['Activity'] == 2] three =train[train['Activity'] == 3] zero = zero.sample(505) one = one.sample(505) train = pd.concat([zero, one, two, three]) train['Activity'].value_counts() ###Output _____no_output_____ ###Markdown This train_SID is made so we can use the Subject_ID values to perform LOPO (leave one person out) later on. ###Code train_SID = train['Subject_ID'].values ###Output _____no_output_____ ###Markdown Apply one-hot encoding to Subject ID and window count Subject_ID and window count must be one-hot encoded to be used as features in our model. Test and train dataframes must be concatenated before we one-hot encode, so that we do not get different encodings for each data set. ###Code train['train'] =1 test['train'] = 0 combined = pd.concat([train, test]) combined = pd.concat([combined, pd.get_dummies(combined['Subject_ID'], prefix = 'SID')], axis =1).drop('Subject_ID', axis =1) combined = pd.concat([combined, pd.get_dummies(combined['count'], prefix = 'count')], axis =1).drop('count', axis = 1) train = combined[combined['train'] == 1] test = combined[combined['train'] == 0] train.drop(["train"], axis = 1, inplace = True) test.drop(["train"], axis = 1, inplace = True) print(train.shape, test.shape) ###Output (2020, 130) (310, 130) ###Markdown We remove activity from our train and test datasets as this is the y variable (target variable) and we are only interested in keeping the features. ###Code train_f = train.drop("Activity", axis =1) test_f = test.drop("Activity", axis =1) ###Output _____no_output_____ ###Markdown Define X (features) and y (targets) ###Code X_train = train_f y_train = train.Activity X_test = test_f y_test = test.Activity ###Output _____no_output_____ ###Markdown Standardize Data Scaling is used to change values without distorting differences in the range of values for each sensor. We do this because different sensor values are not in similar ranges of each other and if we did not scale the data, gradients may oscillate back and forth and take a long time before finding the local minimum. It may not be necessary for this data, but to be sure, we normalized the features.The standard score of a sample x is calculated as:$$z = \frac{x-u}{s}$$Where u is the mean of the data, and s is the standard deviation of the data of a single sample. The scaling is fit on the training set and applied to both the training and test set. ###Code sc = StandardScaler() X_train.iloc[:,:16] = sc.fit_transform(X_train.iloc[:,:16]) X_test.iloc[:,:16] = sc.transform(X_test.iloc[:,:16]) X_train X_train = X_train.values X_test = X_test.values from keras.utils import np_utils y_train_dummy = np_utils.to_categorical(y_train) y_test_dummy = np_utils.to_categorical(y_test) ###Output _____no_output_____ ###Markdown Neural Network- 6 hidden **fully connected** layers with 32 nodes- The **Dropout** layer randomly sets input units to 0 with a frequency of rate at each step during training time, which helps prevent overfitting.- **Softmax** acitvation function - Used to generate probabilities for each class as an output in the final fully connected layer of the model We decided to use ADAM as our optimizer as it is computationally efficient and updates the learning rate on a per-parameter basis, based on a moving estimate per-parameter gradient, and the per-parameter squared gradient. ###Code from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout ###Output _____no_output_____ ###Markdown LOOCV __Leave One Out CV:__Each observation is considered as a validation set and the rest n-1 observations are a training set. Fit the model and predict using 1 observation validation set. Repeat this for n times for each observation as a validation set.Test-error rate is average of all n errors.__Advantages:__ takes care of both drawbacks of validation-set method1. No randomness of using some observations for training vs. validation set like in validation-set method as each observation is considered for both training aโ€บnd validation. So overall less variability than Validation-set method due to no randomness no matter how many times you run it.2. Less bias than validation-set method as training-set is of n-1 size. Because of this reduced bias, reduced over-estimation of test-error, not as much compared to validation-set method.__Disadvantages:__1. Even though each iterations test-error is un-biased, it has a high variability as only one-observation validation-set was used for prediction.2. Computationally expensive (time and power) especially if dataset is big with large n as it requires fitting the model n times. Also some statistical models have computationally intensive fitting so with large dataset and these models LOOCV might not be a good choice. ###Code from sklearn.model_selection import LeaveOneGroupOut # Lists to store metrics acc_per_fold = [] loss_per_fold = [] f1_per_fold = [] # Define the K-fold Cross Validator groups = train_SID inputs = X_train targets = y_train_dummy logo = LeaveOneGroupOut() logo.get_n_splits(inputs, targets, groups) cv = logo.split(inputs, targets, groups) # LOGO fold_no = 1 for train, test in cv: #Define the model architecture model = Sequential() model.add(Dense(256, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(256, activation='relu')) model.add(Dense(256, activation='relu')) model.add(Dense(4, activation='softmax')) #4 outputs are possible model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Generate a print print('------------------------------------------------------------------------') print(f'Training for fold {fold_no} ...') # Fit data to model history = model.fit(inputs[train], targets[train], batch_size=32, epochs=10, verbose=1) # Generate generalization metrics scores = model.evaluate(inputs[test], targets[test], verbose=0) y_pred = np.argmax(model.predict(inputs[test]), axis=-1) f1 = (f1_score(np.argmax(targets[test], axis=1), (y_pred), average = 'weighted')) print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%, F1 of {f1}') f1_per_fold.append(f1) acc_per_fold.append(scores[1] * 100) loss_per_fold.append(scores[0]) # Increase fold number fold_no = fold_no + 1 # == Provide average scores == print('------------------------------------------------------------------------') print('Score per fold') for i in range(0, len(acc_per_fold)): print('------------------------------------------------------------------------') print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Accuracy: {acc_per_fold[i]}% - F1:{f1_per_fold[i]}%') print('------------------------------------------------------------------------') print('Average scores for all folds:') print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})') print(f'> F1: {np.mean(f1_per_fold)} (+- {np.std(f1_per_fold)})') print(f'> Loss: {np.mean(loss_per_fold)}') print('------------------------------------------------------------------------') ###Output ------------------------------------------------------------------------ Training for fold 1 ... Epoch 1/10 62/62 [==============================] - 0s 2ms/step - loss: 1.2943 - accuracy: 0.3921 Epoch 2/10 62/62 [==============================] - 0s 2ms/step - loss: 1.0806 - accuracy: 0.5488 Epoch 3/10 62/62 [==============================] - 0s 2ms/step - loss: 0.7433 - accuracy: 0.6983 Epoch 4/10 62/62 [==============================] - 0s 3ms/step - loss: 0.6003 - accuracy: 0.7736 Epoch 5/10 62/62 [==============================] - 0s 3ms/step - loss: 0.5082 - accuracy: 0.8140 Epoch 6/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4101 - accuracy: 0.8474 Epoch 7/10 62/62 [==============================] - 0s 2ms/step - loss: 0.3453 - accuracy: 0.8762 Epoch 8/10 62/62 [==============================] - 0s 3ms/step - loss: 0.3055 - accuracy: 0.8853 Epoch 9/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2709 - accuracy: 0.8994 Epoch 10/10 62/62 [==============================] - 0s 3ms/step - loss: 0.2085 - accuracy: 0.9277 Score for fold 1: loss of 0.4869212210178375; accuracy of 85.36585569381714%, F1 of 0.8534713991641194 ------------------------------------------------------------------------ Training for fold 2 ... Epoch 1/10 62/62 [==============================] - 0s 2ms/step - loss: 1.2803 - accuracy: 0.4075 Epoch 2/10 62/62 [==============================] - 0s 2ms/step - loss: 0.9858 - accuracy: 0.5971 Epoch 3/10 62/62 [==============================] - 0s 2ms/step - loss: 0.6914 - accuracy: 0.7290 Epoch 4/10 62/62 [==============================] - 0s 2ms/step - loss: 0.5834 - accuracy: 0.7765 Epoch 5/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4761 - accuracy: 0.8170 Epoch 6/10 62/62 [==============================] - 0s 2ms/step - loss: 0.3916 - accuracy: 0.8519 Epoch 7/10 62/62 [==============================] - 0s 3ms/step - loss: 0.3082 - accuracy: 0.8868 Epoch 8/10 62/62 [==============================] - 0s 3ms/step - loss: 0.2462 - accuracy: 0.9166 Epoch 9/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2660 - accuracy: 0.9090 Epoch 10/10 62/62 [==============================] - 0s 2ms/step - loss: 0.1853 - accuracy: 0.9312 Score for fold 2: loss of 2.1405515670776367; accuracy of 66.66666865348816%, F1 of 0.62548945307566 ------------------------------------------------------------------------ Training for fold 3 ... Epoch 1/10 62/62 [==============================] - 0s 2ms/step - loss: 1.2845 - accuracy: 0.4016 Epoch 2/10 62/62 [==============================] - 0s 2ms/step - loss: 0.9895 - accuracy: 0.5928 Epoch 3/10 62/62 [==============================] - 0s 2ms/step - loss: 0.7105 - accuracy: 0.7245 Epoch 4/10 62/62 [==============================] - 0s 2ms/step - loss: 0.5786 - accuracy: 0.7770 Epoch 5/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4698 - accuracy: 0.8280 Epoch 6/10 62/62 [==============================] - 0s 2ms/step - loss: 0.3971 - accuracy: 0.8587 Epoch 7/10 62/62 [==============================] - 0s 2ms/step - loss: 0.3694 - accuracy: 0.8673 Epoch 8/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2914 - accuracy: 0.8961 Epoch 9/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2447 - accuracy: 0.9117 Epoch 10/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2243 - accuracy: 0.9228 Score for fold 3: loss of 1.298040747642517; accuracy of 63.15789222717285%, F1 of 0.640759507464313 ------------------------------------------------------------------------ Training for fold 4 ... Epoch 1/10 62/62 [==============================] - 0s 2ms/step - loss: 1.2915 - accuracy: 0.3963 Epoch 2/10 62/62 [==============================] - 0s 3ms/step - loss: 1.0107 - accuracy: 0.5870 Epoch 3/10 62/62 [==============================] - 0s 2ms/step - loss: 0.7304 - accuracy: 0.7298 Epoch 4/10 62/62 [==============================] - 0s 3ms/step - loss: 0.5906 - accuracy: 0.7788 Epoch 5/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4891 - accuracy: 0.8112 Epoch 6/10 62/62 [==============================] - 0s 2ms/step - loss: 0.3915 - accuracy: 0.8512 Epoch 7/10 62/62 [==============================] - 0s 3ms/step - loss: 0.3385 - accuracy: 0.8725 Epoch 8/10 62/62 [==============================] - 0s 3ms/step - loss: 0.2940 - accuracy: 0.9013 Epoch 9/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2400 - accuracy: 0.9109 Epoch 10/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2347 - accuracy: 0.9185 Score for fold 4: loss of 1.9513981342315674; accuracy of 56.81818127632141%, F1 of 0.5692974063385632 ------------------------------------------------------------------------ Training for fold 5 ... Epoch 1/10 62/62 [==============================] - 0s 2ms/step - loss: 1.2842 - accuracy: 0.3982 Epoch 2/10 62/62 [==============================] - 0s 2ms/step - loss: 1.0430 - accuracy: 0.5643 Epoch 3/10 62/62 [==============================] - 0s 3ms/step - loss: 0.7009 - accuracy: 0.7204 Epoch 4/10 62/62 [==============================] - 0s 2ms/step - loss: 0.5586 - accuracy: 0.7888 Epoch 5/10 62/62 [==============================] - 0s 3ms/step - loss: 0.4598 - accuracy: 0.8247 Epoch 6/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4015 - accuracy: 0.8465 Epoch 7/10 62/62 [==============================] - 0s 2ms/step - loss: 0.3041 - accuracy: 0.8880 Epoch 8/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2761 - accuracy: 0.9043 Epoch 9/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2274 - accuracy: 0.9245 Epoch 10/10 62/62 [==============================] - 0s 3ms/step - loss: 0.2438 - accuracy: 0.9179 Score for fold 5: loss of 2.1546151638031006; accuracy of 52.173912525177%, F1 of 0.5015007875419775 ------------------------------------------------------------------------ Training for fold 6 ... Epoch 1/10 62/62 [==============================] - 0s 2ms/step - loss: 1.2819 - accuracy: 0.3965 Epoch 2/10 62/62 [==============================] - 0s 2ms/step - loss: 1.0358 - accuracy: 0.5742 Epoch 3/10 62/62 [==============================] - 0s 3ms/step - loss: 0.7073 - accuracy: 0.7227 Epoch 4/10 62/62 [==============================] - 0s 3ms/step - loss: 0.5945 - accuracy: 0.7707 Epoch 5/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4975 - accuracy: 0.8141 Epoch 6/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4049 - accuracy: 0.8449 Epoch 7/10 62/62 [==============================] - 0s 3ms/step - loss: 0.3226 - accuracy: 0.8828 Epoch 8/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2843 - accuracy: 0.9000 Epoch 9/10 62/62 [==============================] - 0s 3ms/step - loss: 0.2398 - accuracy: 0.9152 Epoch 10/10 62/62 [==============================] - 0s 2ms/step - loss: 0.1969 - accuracy: 0.9318 Score for fold 6: loss of 1.3011457920074463; accuracy of 60.00000238418579%, F1 of 0.5218390804597701 ------------------------------------------------------------------------ Training for fold 7 ... Epoch 1/10 62/62 [==============================] - 0s 3ms/step - loss: 1.2874 - accuracy: 0.4134 Epoch 2/10 62/62 [==============================] - 0s 3ms/step - loss: 1.0345 - accuracy: 0.5634 Epoch 3/10 62/62 [==============================] - 0s 3ms/step - loss: 0.7292 - accuracy: 0.7234 Epoch 4/10 62/62 [==============================] - 0s 2ms/step - loss: 0.5937 - accuracy: 0.7739 Epoch 5/10 62/62 [==============================] - 0s 2ms/step - loss: 0.4759 - accuracy: 0.8294 Epoch 6/10 62/62 [==============================] - 0s 3ms/step - loss: 0.4015 - accuracy: 0.8546 Epoch 7/10 62/62 [==============================] - 0s 3ms/step - loss: 0.3379 - accuracy: 0.8758 Epoch 8/10 62/62 [==============================] - 0s 3ms/step - loss: 0.3177 - accuracy: 0.8935 Epoch 9/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2565 - accuracy: 0.9086 Epoch 10/10 62/62 [==============================] - 0s 2ms/step - loss: 0.2186 - accuracy: 0.9202 Score for fold 7: loss of 1.9404150247573853; accuracy of 53.84615659713745%, F1 of 0.5082417582417582 ------------------------------------------------------------------------ Training for fold 8 ... Epoch 1/10 ###Markdown Please edit the name of the model below. This will be used to save the model and figures associated with the model. ###Code model_name = '20_TF_FE_balanced_Phys_Only' !mkdir -p saved_model model.save(f'saved_model/{model_name}') ###Output INFO:tensorflow:Assets written to: saved_model/20_TF_FE_balanced_Phys_Only/assets ###Markdown Prediction We obtain the predicted class for each test set sample by using the argmax function on the predicted probabilities that are output from our model. Argmax returns the class with the highest probability. ###Code model = tf.keras.models.load_model(f'saved_model/{model_name}') y_pred = np.argmax(model.predict(X_test), axis=-1) results = model.evaluate(X_test, y_test_dummy, batch_size=32) print("Test loss, Test acc:", results) ###Output 10/10 [==============================] - 0s 1ms/step - loss: 2.6477 - accuracy: 0.4000 Test loss, Test acc: [2.6476845741271973, 0.4000000059604645] ###Markdown A **confusion matrix** is generated to observe where the model is classifying well and to see classes which the model is not classifying well. ###Code cm = confusion_matrix(y_test,y_pred) cm ###Output _____no_output_____ ###Markdown We normalize the confusion matrix to better understand the proportions of classes classified correctly and incorrectly for this model. ###Code cm= cm.astype('float')/cm.sum(axis=1)[:,np.newaxis] cm ax = plt.subplot() sns.heatmap(cm, annot = True, fmt = '.2f',cmap = 'Blues', xticklabels = le1.classes_, yticklabels = le1.classes_) ax.set_xlabel("Predicted labels") ax.set_ylabel('Actual labels') plt.title('Feature Engineered STEP balanced - Confusion Matrix') plt.savefig(f'20_figures/{model_name}_CF.png') ###Output _____no_output_____ ###Markdown The **accuracy** score represents the proportion of correct classifications over all classifications. The **F1 score** is a composite metric of two other metrics:Specificity: proportion of correct 'positive predictions' over all 'positive' predictions.Sensitivity: number of correct 'negative' predictions over all 'negative' predictions.The F1 score gives insight as to whether all classes are predicted correctly at the same rate. A low F1 score and high accuracy can indicate that only a majority class is predicted. ###Code a_s = accuracy_score(y_test, y_pred) f1_s = f1_score(y_test, y_pred, average = 'weighted') print(f'Accuracy Score: {a_s:.3f} \nF1 Score: {f1_s:.3f}') ###Output Accuracy Score: 0.400 F1 Score: 0.377
python/ch6/6.7.1-model-parameter-estimation.ipynb
###Markdown Let us suppose that we know that the height of an adult resident in Statsville lies between 160 cm and 170 cm.We want to predict if this resident is female. For this purpose, we have collected a set of height samples from adult female residents in Statsville. This becomes our training data. From physical considerations, we can assume that the distribution of heights is Gaussian. Our goal is to estimate the parameters ($\mu$, $\sigma$) of this Gaussian. In this notebook, we will study two ways of doing this (1) Maximum Likelihood Estimation and (2) Maximum A Posteriori Estimation. Let us first create the dataset $X$ by sampling 10000 points from a Gaussian distribution with $\mu$=152 and $\sigma$=8. In real life scenarios, we do not know the mean and standard deviation of the true distribution. But for the sake of this example, let's assume that the mean height is 152cm and standard deviation is 50cm. ###Code torch.random.manual_seed(42) num_samples = 10000 mu = 152 sigma = 8 X = torch.normal(mu, sigma, size=(num_samples, 1)) print('Dataset shape: {}'.format(X.shape)) ###Output Dataset shape: torch.Size([10000, 1]) ###Markdown Maximum Likelihood Estimate (MLE)In MLE, we try to find the parameters that "best explain" our data. In other words, we try to find the parameters that maximize the joint likelihood of our training data instances. Let's say our model is parameterised by $\theta$. The likelihood function $p(X|\theta)$ shows how likely the sample distribution $X$ is for different values of $\theta$. With MLE, our goal is to find the parameters $\theta$ that maximise $p(X|\theta)$. We can assume that our model parameterises a Gaussian distribution $N(\mu, \sigma)$The likelihood function can be written as $$ p(X|\theta) = N(X|\mu,\sigma) = \prod_{i=1}^{n}N(x_i|\mu,\sigma) = \Bigl(\frac{1}{2\pi\sigma^2}\Bigl)^\frac{N}{2}exp\Bigl(\frac{-1}{2\sigma^2}\sum_{i=1}^N(x_i - \mu)^2\Bigl) $$Maximising the likelihood function yields $$\mu_{MLE} = \frac{1}{N}\sum_{i=1}^nx_i$$$$\sigma_{MLE} = \frac{1}{N}\sum_{i=1}^n(x_i - \mu)^2$$In practice, we maximise the logarithm of the likelihood because it makes for much easier calculations when dealing with exponential functions. Refer to the section 6.8 in the book for a detailed derivationThus by computing the sample mean and the sample standard deviation, we can find the parameters of the best fit Gaussian for the dataset. Once we estimate the parameters, we can find out the probability that a sample lies in the range using the following formula$$ p(a < X <= b) = \int_{a}^b p(X) dX $$ ###Code # Let us compute the mean and standard deviation of the sampled points. sample_mean = X.mean() sample_std = X.std() print('Sample mean: {}'.format(sample_mean)) print('Sample standard deviation: {}'.format(sample_std)) # As expected, the sample mean and sample standard deviation are close to the corresponding values # of the Normal distribution that the points were sampled from gaussian_mle = Normal(sample_mean, sample_std) # We want to find out the probability that a height between 160 and 170 belongs to an adult female resident a, b = torch.Tensor([160]), torch.Tensor([170]) prob = gaussian_mle.cdf(b) - gaussian_mle.cdf(a) print('Prob: {}'.format(prob)) a, b = torch.Tensor([160]), torch.Tensor([170]) prob = gaussian_mle.cdf(b) - gaussian_mle.cdf(a) ###Output _____no_output_____ ###Markdown Maximum Likelihood Estimate using Gradient DescentAbove, we were able to estimate the parameters using the closed form solution. Now, let us try to arrive at these parameters iteratively using gradient descent. In real-life scenarios, we don't use gradient descent because the closed form solution is available. But we discuss the gradient descent based approach to highlight some of the challenges.Our objective is to find the parameters $\theta$ that maximise the likelihood function $p(X|\theta)$. We choose to maximise the log of the likelihood function since it is more mathematically convenient. This can alternatively viewed as minimising the negative log-likelihood function.$$ -\log p(X|\theta) = \frac{N}{2}\log(2\pi\sigma^2) + \frac{1}{2\sigma^2}\sum_{i=1}^n(x_i - \mu)^2$$The optimisation process is as follows: 1. Initialise the model parameters, $\mu$ and $\sigma$ with random values 2. Compute the loss value (negative log-likelihood) 3. Find the gradients of the loss w.r.t the model parameters 4. Update the model parameters in the opposite direction of the gradient values 5. Repeat steps 1-4 until loss diminishes to a small value ###Code import torch from torch.autograd import Variable dtype = torch.FloatTensor # Negative log likelihood function defined above def neg_log_likelihood(X, mu, sigma): N = X.shape[0] X_minus_mu = torch.sub(X, mu) return torch.mul(0.5 * N, torch.log(2 * np.pi * torch.pow(sigma, 2))) + \ torch.div(torch.matmul(X_minus_mu.T, X_minus_mu), 2 * torch.pow(sigma, 2)) # Gradient descent to estimate the parameters def optimise(X, mu, sigma, loss_fn, num_iters=100, lr = 0.001): X = torch.Tensor(X) # Convert the data to a torch tensor iters, losses, mus, sigmas = [], [], [], [] for i in range(num_iters): loss = loss_fn(X, mu, sigma) if i % (num_iters / 10) == 0: print('iter: {}, loss: {}, mu: {}, sigma: {}'.format(i, loss[0][0] / num_samples, mu.data[0], sigma.data[0])) iters.append(i) losses.append(loss[0][0] / num_samples) mus.append(float(mu.data)) sigmas.append(float(sigma.data)) # We don't explicitly compute the gradients ourselves. We rely on torch to automatically # compute the gradients. The gradients are stored in <param>.grad. loss.backward() # We scale the gradients by the learning rate before update mu.data -= lr * mu.grad sigma.data -= lr * sigma.grad # We zero out the gradients before every update. Otherwise gradients from previous iterations get accumulated mu.grad.data.zero_() sigma.grad.data.zero_() return iters, losses, mus, sigmas def plot_mle_fit(iters, mus, sigmas, expected_mu=152, expected_sigma=8, mu_text_y=160, sigma_text_y=100): plt.figure(figsize=(4, 4)) ax1 = plt.subplot(2, 1, 1) ax1.plot(iters, mus) ax1.plot([iters[0], iters[-1]], [expected_mu, expected_mu], color='green') ax1.text(iters[int(len(iters) / 2)], mu_text_y, r'Expected $\mu={}$'.format(expected_mu)) ax1.set_xlabel('Iteration') ax1.set_ylabel('Mean (cm)') ax1.grid(True) ax2 = plt.subplot(2, 1, 2) ax2.plot(iters, sigmas) ax2.plot([iters[0], iters[-1]], [expected_sigma, expected_sigma], color='green') ax2.text(iters[int(len(iters) / 2)], sigma_text_y, r'Expected $\sigma={}$'.format(expected_sigma)) ax2.set_xlabel('Iteration') ax2.set_ylabel('Sigma (cm)') ax2.grid(True) plt.show() # We define our model params mu and sigma as torch Variables. # Note that requires_grad has been set to true - this tells PyTorch that gradients are # required to be computed for these variables. We randomly initialise both mu and sigma. mu = Variable(torch.Tensor([1]).type(dtype), requires_grad=True) sigma = Variable(torch.Tensor([1]).type(dtype), requires_grad=True) iters, losses, mus, sigmas = optimise(X, mu, sigma, neg_log_likelihood, num_iters=1000, lr=0.001) plot_mle_fit(iters, mus, sigmas, mu_text_y=200, sigma_text_y=10000) ###Output _____no_output_____ ###Markdown What just happened? The estimated mean and standard deviation are nowhere close to our expected values of 152 and 50, but are instead very large numbers.Let's try to tweak things a bit. We had randomly initialised mu and sigma with a value of 5. Now, let us initialise them somewhere in the neighborhood of our expected values. Let's say mu = 100 and sigma is 10. ###Code # Let us initialise mu to 100 and sigma to 10 mu = Variable(torch.Tensor([100]).type(dtype), requires_grad=True) sigma = Variable(torch.Tensor([10]).type(dtype), requires_grad=True) iters, losses, mus, sigmas = optimise(X, mu, sigma, neg_log_likelihood, num_iters=500) ###Output iter: 0, loss: 17.09115219116211, mu: 100.0, sigma: 10.0 iter: 50, loss: 4.972278594970703, mu: 118.44664001464844, sigma: 38.49307632446289 iter: 100, loss: 4.69744873046875, mu: 129.0364227294922, sigma: 33.65632247924805 iter: 150, loss: 4.295865058898926, mu: 139.37002563476562, sigma: 24.12705421447754 iter: 200, loss: 3.511776924133301, mu: 151.1002960205078, sigma: 8.47098159790039 iter: 250, loss: 3.502779722213745, mu: 152.05128479003906, sigma: 8.035272598266602 iter: 300, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918 iter: 350, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918 iter: 400, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918 iter: 450, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918 ###Markdown Our model has converged! The loss has decreased and the estimated $\mu$ and $\sigma$ are close to 152 and 50 respectively. The initial values of $\mu$ and $\sigma$ played a crucial role in helping the model converge. We were lucky this time because we knew what to expect for $\mu$ and $\sigma$. However, this is typically not the case in real world scenarios. Is there a better way to solve this problem? This is where Maximum A Posterior (MAP) estimation comes into play. ###Code plot_mle_fit(iters, mus, sigmas, mu_text_y=145, sigma_text_y=10) ###Output _____no_output_____ ###Markdown Maximum A Posteriori (MAP) EstimationInstead of maximizing $p(X|\theta)$, we can directly maximize $p(\theta|X)$ -> the probability of the parameters given the data instances.Using bayes theorem, $$p(\theta|X) = \frac{p(X|\theta)p(\theta)}{p(X)}$$Maximizing $p(\theta|X)$ is equivalent to maximizing the numerator of the above expression because the denominator is independent of $\theta$. $p(X|\theta)$ is what we maximized in MLE. We need to estimate $p(\theta)$, which is also called the prior probability. A popular approach is to say that we want the parameters to be as small as possible. Hence, we assume that $p(\theta)$ is proportional to $e^{-\theta^2}$. Refer to the chapter in the book for details. Maximizing $p(\theta|X)$ is equivalent to minimizing $-\log p(\theta|X)$$$ -\log p(\theta|X) = -\log p(X|\theta) - \log p(\theta) = \frac{N}{2}\log(2\pi\sigma^2) + \frac{1}{2\sigma^2}\sum_{i=1}^n(x_i - \mu)^2 + \mu^2 + \sigma ^ 2 $$This is the same function as the negative log-likelihood discussed in MLE with two additional terms, $\mu^2$ and $\sigma^2$. They act as regularizers and prevent the parameters from exploding by penalizing large values of $\mu$ and $\sigma$. ###Code # Here we add two additional parameters to the loss function namely mu^2 and sigma^2 # These parameter terms act as regularizers that penalise large values of mu and sigma. def neg_log_likelihood_regularized(X, mu, sigma, k=0.2): """ k is the regularization factor that controls the weight of the regularization loss """ N = X.shape[0] X_minus_mu = torch.sub(X, mu) loss_likelihood = torch.mul(0.5 * N, torch.log(2 * np.pi * torch.pow(sigma, 2))) + \ torch.div(torch.matmul(X_minus_mu.T, X_minus_mu), 2 * torch.pow(sigma, 2)) loss_reg = k * (torch.pow(mu, 2) + torch.pow(sigma, 2)) return loss_likelihood + loss_reg # Let us run the optimiser with the regularise log likelihood function mu = Variable(torch.Tensor([1]).type(dtype), requires_grad=True) sigma = Variable(torch.Tensor([1]).type(dtype), requires_grad=True) iters, losses, mus, sigmas = optimise(X, mu, sigma, loss_fn=neg_log_likelihood_regularized, num_iters=30000, lr=0.001) plot_mle_fit(iters, mus, sigmas, mu_text_y=180, sigma_text_y=3000) ###Output _____no_output_____
Linked List/0902/143. Reorder List.ipynb
###Markdown ่ฏดๆ˜Ž๏ผš ็ป™ๅฎšไธ€ไธชๅ•้“พๅˆ—่กจL๏ผšL0โ†’L1โ†’โ€ฆโ†’Ln-1โ†’Ln๏ผŒๅฐ†ๅ…ถ้‡ๆ–ฐๆŽ’ๅบไธบ๏ผšL0โ†’Lnโ†’L1โ†’Ln-1โ†’L2โ†’Ln-2โ†’โ€ฆใ€‚ ๆ‚จๅฏ่ƒฝๆ— ๆณ•ไฟฎๆ”นๅˆ—่กจ่Š‚็‚นไธญ็š„ๅ€ผ๏ผŒๅช่ƒฝๆ›ดๆ”น่Š‚็‚นๆœฌ่บซใ€‚Example 1: Given 1->2->3->4, reorder it to 1->4->2->3.Example 2: Given 1->2->3->4->5, reorder it to 1->5->2->4->3. ###Code class ListNode: def __init__(self, val=0, next=None): self.val = val self.next = next class Solution: def reorderList(self, head: ListNode) -> None: """ Do not return anything, modify head in-place instead. """ # ---------------------------------------------- # Save linked list in array arr = [] cur, length = head, 0 while cur: arr.append(cur.val) cur = cur.next length += 1 left = 0 right = length - 1 last = head while left < right: arr[left].next = arr[right] left += 1 if left == right: last = arr[right] break arr[right].next = arr[left] right -= 1 last = arr[left] if last: last.next = None ###Output _____no_output_____
examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb
###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 3 class Amazon Phone review classifier training]With the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode) 1. Install Java 8 and NLU ###Code import os from sklearn.metrics import classification_report ! apt-get update -qq > /dev/null # Install java ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! pip install pyspark==2.4.7 ! pip install nlu > /dev/null import nlu ###Output _____no_output_____ ###Markdown 2. Download Amazon Unlocked mobile phones dataset https://www.kaggle.com/PromptCloudHQ/amazon-reviews-unlocked-mobile-phonesdataset with unlocked mobile phone reviews in 5 review classes ###Code ! wget http://ckl-it.de/wp-content/uploads/2021/01/Amazon_Unlocked_Mobile.csv import pandas as pd test_path = '/content/Amazon_Unlocked_Mobile.csv' train_df = pd.read_csv(test_path,sep=",") cols = ["y","text"] train_df = train_df[cols] train_df ###Output _____no_output_____ ###Markdown 3. Train Deep Learning Classifier using nlu.load('train.classifier')You dataset label column should be named 'y' and the feature column with text data should be named 'text' ###Code # load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns # Since there are no trainable_pipe = nlu.load('train.classifier') fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] ) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df.iloc[:50] ) preds ###Output tfhub_use download started this may take some time. Approximate size to download 923.7 MB [OK!] ###Markdown Test the fitted pipe on new example ###Code fitted_pipe.predict("It worked perfectly .") ###Output _____no_output_____ ###Markdown Configure pipe training parameters ###Code trainable_pipe.print_info() ###Output The following parameters are configurable for this NLU pipeline (You can copy paste the examples) : >>> pipe['classifier_dl'] has settable params: pipe['classifier_dl'].setMaxEpochs(3) | Info: Maximum number of epochs to train | Currently set to : 3 pipe['classifier_dl'].setLr(0.005) | Info: Learning Rate | Currently set to : 0.005 pipe['classifier_dl'].setBatchSize(64) | Info: Batch size | Currently set to : 64 pipe['classifier_dl'].setDropout(0.5) | Info: Dropout coefficient | Currently set to : 0.5 pipe['classifier_dl'].setEnableOutputLogs(True) | Info: Whether to use stdout in addition to Spark logs. | Currently set to : True >>> pipe['default_tokenizer'] has settable params: pipe['default_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+ pipe['default_tokenizer'].setContextChars(['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]) | Info: character list used to separate from token boundaries | Currently set to : ['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"] pipe['default_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True pipe['default_tokenizer'].setMinLength(0) | Info: Set the minimum allowed legth for each token | Currently set to : 0 pipe['default_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed legth for each token | Currently set to : 99999 >>> pipe['default_name'] has settable params: pipe['default_name'].setDimension(512) | Info: Number of embedding dimensions | Currently set to : 512 pipe['default_name'].setStorageRef('tfhub_use') | Info: unique reference name for identification | Currently set to : tfhub_use >>> pipe['sentence_detector'] has settable params: pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : [] pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0 pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999 >>> pipe['document_assembler'] has settable params: pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink ###Markdown Retrain with new parameters ###Code # Train longer! trainable_pipe['classifier_dl'].setMaxEpochs(5) fitted_pipe = trainable_pipe.fit(train_df.iloc[:100]) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document') #sentence detector that is part of the pipe generates sone NaNs. lets drop them first preds.dropna(inplace=True) print(classification_report(preds['y'], preds['category'])) preds ###Output precision recall f1-score support average 0.00 0.00 0.00 29 good 0.65 0.94 0.77 32 poor 0.69 0.95 0.80 39 accuracy 0.67 100 macro avg 0.45 0.63 0.52 100 weighted avg 0.48 0.67 0.56 100 ###Markdown Try training with different Embeddings ###Code # We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert! nlu.print_components(action='embed_sentence') from sklearn.metrics import classification_report trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier') # We need to train longer and user smaller LR for NON-USE based sentence embeddings usually # We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch # Also longer training gives more accuracy trainable_pipe['classifier_dl'].setMaxEpochs(90) trainable_pipe['classifier_dl'].setLr(0.0005) fitted_pipe = trainable_pipe.fit(train_df) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df,output_level='document') #sentence detector that is part of the pipe generates sone NaNs. lets drop them first preds.dropna(inplace=True) print(classification_report(preds['y'], preds['category'])) #preds ###Output sent_small_bert_L12_768 download started this may take some time. Approximate size to download 392.9 MB [OK!] precision recall f1-score support average 0.72 0.67 0.69 500 good 0.85 0.87 0.86 500 poor 0.78 0.83 0.80 500 accuracy 0.79 1500 macro avg 0.78 0.79 0.79 1500 weighted avg 0.78 0.79 0.79 1500 ###Markdown 5. Lets save the model ###Code stored_model_path = './models/classifier_dl_trained' fitted_pipe.save(stored_model_path) ###Output Stored model in ./models/classifier_dl_trained ###Markdown 6. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk. ###Code hdd_pipe = nlu.load(path=stored_model_path) preds = hdd_pipe.predict('It worked perfectly.') preds hdd_pipe.print_info() ###Output The following parameters are configurable for this NLU pipeline (You can copy paste the examples) : >>> pipe['document_assembler'] has settable params: pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink >>> pipe['sentence_detector'] has settable params: pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : [] pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999 pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0 pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False >>> pipe['regex_tokenizer'] has settable params: pipe['regex_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True pipe['regex_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+ pipe['regex_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed length for each token | Currently set to : 99999 pipe['regex_tokenizer'].setMinLength(0) | Info: Set the minimum allowed length for each token | Currently set to : 0 >>> pipe['glove'] has settable params: pipe['glove'].setBatchSize(32) | Info: Batch size. Large values allows faster processing but requires more memory. | Currently set to : 32 pipe['glove'].setCaseSensitive(False) | Info: whether to ignore case in tokens for embeddings matching | Currently set to : False pipe['glove'].setDimension(768) | Info: Number of embedding dimensions | Currently set to : 768 pipe['glove'].setMaxSentenceLength(128) | Info: Max sentence length to process | Currently set to : 128 pipe['glove'].setIsLong(False) | Info: Use Long type instead of Int type for inputs buffer - Some Bert models require Long instead of Int. | Currently set to : False pipe['glove'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768 >>> pipe['classifier_dl'] has settable params: pipe['classifier_dl'].setClasses(['average', 'poor', 'good']) | Info: get the tags used to trained this NerDLModel | Currently set to : ['average', 'poor', 'good'] pipe['classifier_dl'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768 ###Markdown ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 3 class Amazon Phone review classifier training]With the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode)You can achieve these results or even better on this dataset with training data:![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAtIAAAD2CAYAAAD7yGCjAAAgAElEQVR4Ae2d+68VxZq/v/+KP5gYww9kYgwxBggRMoRLIBIhnEAUEy6RyxgCCig6ymGAhJsDw1EHhghKkMBRFFRGwMtBDQiEABJAQS7hLhwQBYlY3zzlvItatarXWt1rbdh7r08l0L26q6urn6q36lNvVff+f05BBERABERABERABERABEQgN4H/l/sKXSACIiACIiACIiACIiACIuAkpFUJRKCTEbh06ZJ78sknXZ8+fdyRI0ea+nS//vqre/75593DDz/sduzY0dS0lZgIiIAIiIAIdDQCEtIdrcSUXxGoQeCbb75x9913n//37rvv1oid7/TRo0fdo48+6tOeO3duvosVu0MSCOuT1SuOpQIDrUmTJpXqH/H5zfF7GW7cuOGWL1/u+vbtW5a3rOe4l3nVvUVABDoWAQnpjlVeyq0I1CQgj3RNRIqQg0BHF9K///67++tf/1omoLMGBLdv33bfffedGzdunLv//vtdsweiObArqgi0CYGvvvrKPf74427nzp1tkn4rJioh3YqlrmcWAREQgQIETFTX48k17/S99kjbLMpTTz3lTp065f7444+KJ0dAf/3112748OFlgltCugKVDnRwAtRpBpL12HAHf9S7ln0J6buGWjcSAREQgY5NoCMKacvz5s2bM+GfOHHC9erVy3uhZ86c6V577TUvNiSkM5HpRAclICHd/IKTkG4+U6XYgQlcvnzZDR482C1dutSdPn3aTZs2zXXt2tU98MADbuLEif5Y+Hjm7aJx+uWXX9yyZcv8i3hMCzM9TBpxoNO2dPEMDBgwwG3fvt3hFUuFixcvugULFrju3bv7zp0X/bgP9wuDNZA2bc1aZvKXFVg3um7dOn9/ruE5ecbUC4omRixtttU8Gr/99pvbuHFjKW34jRo1yu3bt6/MI2heS9Zb85yvvvqqzwf8Ro4cmcxL1vO09+M8I3Xrp59+8hx4Pp7zkUcecevXr3csQQhDzJDygQ+cUoH688UXX3hu8DaGMXOupZ6//vrrpTXDxCc/XJ9VD7nO6kG1sre8Wdnea490PXmGNeLZWJktsW1WwK5Cu2e99jvvvOOww1So1+7xsO/fv9+3N5Qjtkna2DbPFQZr36iLnCMOAwiuwRtPOnGwfNDuEI/4qbTj6+7177Nnz7p58+b59ph8034uWbLEXb16tSxrIZOyE875pT1xO0qd6N27tzt27FhZGweX2I4t7Xr7E+5Pu856fisXa5fpN+JQb5tiNgCHrH+0TeRXIT8BCen8zHRFJyZgDR9ryKwhCxsexMaVK1dKBExIL1y40AuRMC77cfzdu3eXXtaL4y5evLhCTNH5pvLBtXEnb52/pRt3AKVMO+c70RkzZiQb1ZTwSTXEWWKKjmDy5MnJtBF3dMI2vW5iq2fPnu6xxx6ruGbo0KHuwoULYdY77D6d3sCBA/0gyASPlRVcNm3aVHo2uLzyyisVPIhPffj+++9LcdlBhFMHLb1wG3eQxjyMY/vkY9u2bWVphz+sHmSVfRjX7pOqT2G8ttiHtT1T1rZavsyWYhsrmtc9e/b4AWIqLymW9do9doQ9UW6ptLHDcMBt7dv06dPdc889V3ENX/oJBduPP/7oBg0aVBGPe02ZMuWev0SaVR48A8+SYhKXqTGhzsSBuHE7yrGHHnrI/eUvf6lIP7ZjS7ve/uT8+fNu2LBhFenyHAjq+EtJ9bYpZrcpHnYsbidiFvqdTUBCOpuNzrQgAWv4aFzoQBC+iBTE87PPPusbuFBomJAmPmKQc8T/+eefffwuXbq4vXv3epKkgbBGCH355Zc+Hh0hjf4zzzzjunXr5g4dOlSibvFpnBFJJijpGD/++GP30UcfleLGO6kOIIyDN6VHjx4OMW0dLfk+ePCg91QigrKCNcopAcA13BseeLfPnDnjRfOtW7ccU+t0BnjLWKtKMLFFfDxePJfxnjBhghcIu3btyspKhzpu4o7yZPnAuXPnPJvPP//cc8FbCScCopp4eKCt3PFcvv/++z7urFmzPCcDYPGps7xMBEM8ywghvHDh4A/m5GXLli0l7xzxqU+UT5gPS9+2tcre4rG1sq0mWMP4zdw31tSrrH/V8mV1mG0zwvz58734oi6bx58yWb16dal9sPvksXtrf2hTeHnMyh0hPmTIEF+HsCkLYfuGvfF81Cu80+QRVrYEhmO0D9QJ6p15zqmP1Evq52effWZJt6vthg0b/LNgFzAhZLWbxoQ6Ewf4pIQ0nHh+ZjEoL8qUWUVYjR492l2/ft0nZWkTv1Z/Qj6tDGbPnl2yWfK9cuVKfz8+a3rt2rVSNq2e19Om2EVWt7Pab4unbf0EJKTrZ6WYLUDAGj5eTOLrF2E4cOCA7wxp7CxYR0Z8hFEYrDG3BgtBjbBmyUMcLG2uscB1NJBMT1pnYOdqbVMdQHgNecU7SuNu09nh+Wr71cQUHQgdSf/+/b2IDtNh0ECHQKdigxETW3RW8VvkdOjENX5hWh1xn06P8qeMTUzxHHTEiB4TdohpxCxT7ZwLA/Xg5Zdf9vHtXMjw8OHDYfRc+1b3LR+pi6uVfRzf8lUtvfiatvidJ892fxMbbLOCxaGOhv9SgmzVqlUl4WUD16x0yW+9dr9ixQof1+wpTJNlGgzOqS/WflgZY59xXUHkc1975pMnT/olDAzEbAbJ0mfZRL9+/bzws2PtaYvA51leeOGFzKVQll9jkio3WKSENHb83nvvlXEx28RBgaOCYGnX058Y0zFjxpSJZdIhbWao8ITTV1iot02x+Gyt3naWdjV8tnu1LyF9r8jrvu2SgDV8qUYVYY34DM+ZkA6PZT2YCeuw0433Q5FOg0dnUMQjm+oA4nwxTRiue1y0aJH3jsedZnxdNWFi/LK8mnYt+SNUE1sWt7M0+NSRuFOO2fLbhHVcN8Lfqc6aafybN2+mkqw4ZutHbd19mHY14ZunTKqVbUWG2vBAnjxbNkxsWD214+HW4oTs2E+1BYhnW0rBsh4Gm4hf8/LG6dZr99yL9bqI3jiYLYblmToWX2e/TVjHzxf+zlPnLN27sUV4slQOjvxjUMr65dCba/kwJqlyo4xjm00ds7TWrFlTFr9a2nF/Yn0J66lTwepb2B7W26aE6aXSCc9rPz8BCen8zHRFJyZQreEzj0HY4FrjFx7LwmMNWNgRxfuxkA6XhmSlmzperbEP49t0Jx27rdulw6/mNasmTIxf6AUL72fXkj9CNbFlccOOI0yro+3X2+kZw7huhL9TQjpr8BJzYn111rp77hEKr/jaPGVSrWzjdNvyd548Wz7MVq2e2vFGtgxQaS8YsBp/BrLxulfuWa/dU6fwDNM2xcHqUVieqWPxdfbbuIX1Lt5vr0LanoGlZby4xwvd5J02DkEdOguMSaoNpyzyCGlmCML41dKO+xPrS0gjFaxOhu1hvW1KmF4qnfC89vMTkJDOz0xXdGIC1Ro+my4MGzpr/FKNcIzJOqbUNGwcl9/Eo/FnOUTY8KfixsdSHUAcJ/6NeGZtHve0dZJxHH7bc4QNusXjjfgRI0Z4z3281IVnsLV+dm01sVXtPna/jrStt9MzJuFay2rPWW05Teo66i9lHK7DJJ7V/VB4xdfnKRN7jmrpxem3xe88ebb7m9hg2xYBW+D9CwQ19hJ+SSKP3eO9xOOaWqtsSztYGmbtRz1lbM9rbVvY3tm5jrg9fvy4XxIVe/CNSTwo4DjveYTCmOfOaltpP8eOHVs2sLG0U/1D3J/YUhrSiB0ZtrQjzku9bUpYXla3rQ0Oz2m/GAEJ6WLcdFUnJWANHy/ZMMVOB0Qj9o9//MO/BR43ZNbZpBrKGJGtS+Ztcj4zhtCoFuzNc15g4aUkWxPLGlo620ZeNqQTJ8+s5eP5CGy3bt3qvWHVBEQ1YQIv1lQi1OiY7GVDprDXrl3rX8YJ1/5WE1vV7lONW3s9l6fTszW1/EU+PFfhmur4+WDO5xBhzicGeWGUsuQ4/DlndYdr7YUm6hTxSJuXEufMmeM9dtWEb54yqVa28TO05e88ebZ8mNioZgcWt9YWDjAPX+7kGj6DyAu18dcS8tg9doz3mjaFpRhWnnwlhPcfOEccC9a+VStji2vCEK8573WEYt/itNftm2++6b9mAmPsgMDzUMfjNtwG/xz/9ttvvT0gurElbCqOT51grTL1yl4O5gVMbJVBDWVNORCMdz39CS93Tp061adBW2E2y3IUXmokbc4Tz0KeNsWusXdPGCTEzg6Lo20+AhLS+XgpdicnYA0fDWj8j4Ys/HQbKPIIaeL//e9/Ly2hiNOPG2w6AKYhbclFHD/s5Kvl266j4bdg4sLOhVu8ZOHn1UwQhXHi/TAv1T7hFE9lW9qpjt3yGObb8t8Rt3k6Pfsz7zFn+01aYYA5X4Sx8+E2FmosJUBghXHC/bAsrHzC8/F+WPZWZnEc+x3nJXyGttq3PFWrR/U8Z8y83vzWSjv+7GUeu0ewZX32kPYqTtvaibCMqz0HzOw9CivDcFuNabV02/ocZRXmM9yPP9sHbxuIhvFod/EOx+0y9T2MF+7z6Tps0YLxDuPYfqo/qfbZQwZLnA9DnjbFrrOBmuXDtvfCNi1PHX0rId3RS1D5byqBVMOHR3j8+PH+jxWYd8NumldIcz1TrqRHutaIsY0bbO6Rio/QZd1fOP2XyneYNvthp4dX49NPP/Xiy4Q6L57xh1/iP/hRSwiQdiimyDf5Cf+IDM/KGl4a8TBY2qmOnfzG+Q6v7Wj7eTs9ypdy5nOBcVmmRF0cn3LljwJR38J6iwea2QxbN2plQ7znn3++bI20lU98//B3WPZWZuH5cP9edNaWp7D+x3WnnudMMY/TyfrN5x5ZYmEvd1I2DHyy/gBOvXbP/RDTeIz5VjHijH/M+qTStnYiZW9Zecdm+e50SlBXY5qV3t04znOGf3AIJvCBU+jRtbxgO6xdxxaIS9nwNSOeL26XU0IaG43bZNI23qENcI+s/oRr+CNeIW+4p9pl4uZtU+x5mbWK+6B7YZuWn46+lZDu6CWo/DeVgDV8jXSaTc2QEhMBERABEWg3BBDSsbjOypz6kywyneu4hHTnKk89TYME1PA1CFCXi4AIiEAnJiAh3YkLt+CjSUgXBKfLOicBCenOWa56KhEQARFoBgEJ6WZQ7FxpSEh3rvLU0zRIQEK6QYC6XAREQAQ6MQEJ6U5cuAUfTUK6IDhdJgIiIAIiIAIiIAIi0NoEJKRbu/z19CIgAiIgAiIgAiIgAgUJSEgXBKfLREAEREAEREAEREAEWpuAhHRrl7+eXgREQAREQAREQAREoCABCemC4HSZCIiACIiACIiACIhAaxOQkG7t8tfTi4AIiIAIiIAIiIAIFCQgIV0QnC4TAREQAREQAREQARFobQIS0q1d/np6ERABERABERABERCBggQkpAuC02UiIAIiIAIiIAIiIAKtTUBCurXLX08vAiIgAiIgAiIgAiJQkICEdEFwukwEREAEREAEREAERKC1CUhIt3b56+lFQAREQAREQAREQAQKEpCQLghOl4mACIiACIiACIiACLQ2AQnp1i5/Pb0IiIAIiIAIiIAIiEBBAhLSBcHpMhEQAREQAREQAREQgdYmICHd2uWvpxcBERABERABERABEShIQEK6IDhdJgIiIAIiIAIiIAIi0NoEJKRbu/z19CIgAiIgAiIgAiIgAgUJSEgXBKfLREAEREAEREAEREAEWpuAhHRrl7+eXgREQAREQAREQAREoCABCemC4HSZCIiACIiACIiACIhAaxOQkG7t8tfTi4AIiIAIiIAIiIAIFCQgIV0QnC4TAREQAREQAREQARFobQIS0q1d/np6ERABERABERABERCBggQkpAuC02UiIAIiIAIiIAIiIAKtTUBCurXLX08vAiIgAiIgAiIgAiJQkICEdEFwukwEREAEREAEREAERKC1CUhIt3b56+lFQAREQAREQAREQAQKEpCQLghOl4mACIiACIiACIiACLQ2AQnp1i5/Pb0IiIAIiIAIiIAIiEBBAhLSBcHpMhEQAREQAREQAREQgdYmICHd2uWvpxcBERABERABERABEShIQEK6IDhdJgIiIAIiIAIiIAIi0NoEJKRbu/z19CIgAiIgAiIgAiIgAgUJSEgXBKfLREAEREAEREAEREAEWpuAhHRrl7+evoUJ/Pbbb27jxo1uwIAB7r777nNdu3Z106ZNc6dPny5M5ddff3WTJk3y6ZFm1r9vvvmmdI9ffvnFrVu3rpSPBx54wI0cOdJ98cUX7vbt26V42qlO4MSJE778KEe4U66U7++//179wjrPWjk98cQTjjLiHpQ1ZR6HP/74wx0+fNhNnz7dde/evVQPwnKPr9HvOwTawjbvpO5cveVDvP3797tx48b5Mr///vvd448/7rZv3y7bDIE2aZ/2bufOnSXe2Fjfvn3d8uXLHfYXB+LTTg4fPtxRNtjlqFGj3L59+3wZx/FJg7R69erlbfLhhx92CxYscBcvXoyj6ncOAhLSOWApqgh0FgJ01HPmzCkJnFDw0sgeOXKk0KMWEdLvvvtuMh90DOvXry+Uj1a76MCBA+7RRx9Ncly4cGHDYpqOuU+fPhXpp4T0jRs33Lx583zHHtYr9iWka9fMtrJNu3Oe8vnwww/9ADsuR2yTwS9CW6F5BI4ePZppx1OmTCkbtDJARhRTFnH5MJjesWNHWcZ+/PFHN2jQoIq4XIsQv3TpUll8/aifgIR0/awUUwQ6DYFt27b5BpgG9IcffvAd4rVr19zs2bN9QztjxgxHh97MYCJ76NCh7sKFC6Wk6azffvttd+XKFX/s1q1bbvPmzb4DTwm10oXa8QQotzFjxnhv1OrVqx1CCU/Vrl27vOeJTnXPnj2FaeHpRkTjvfrb3/7mzp49m+mNRFgtW7bM1yE8Y19//XVZ5184Ey10YVvaZp7yOXfunBs4cKAve+oSwo16xSB7yJAhvm5RNxSaR+DYsWN+EIrohXXIm4EyQtvCoUOHXLdu3bwIpkyISxnh0cYZQttubSrX4I1+5ZVX3Mcff+zbCOoCbT9liZimzVUoRkBCuhi3hq5CoHz66ad++tqmSFPTN7t373ZdunRxs2bNSnqUmLbFANiGITXFm5qKs9EvHkGMjA6QzpIRLlN58RQ/hoqRjh8/vuSlYNo2a2qI58RrwbPFI2Z+xyKJ6SXSIg+cpzHg+mYLupBVK+6boKVhZvo9DDS8NMC9e/d2J0+eDE81vG8CgfpWKyAOn3rqKTd58mR38+bNWtFb+jxeXmwWL3C8jMPaiKVLlxZiRGe7ZMkSb++xhyuVIG0Pdku5paaiU9fo2B0CbW2becqH8k71L+TW+qYNGzbcybz22ozAm2++6Xr06OEQ2hbWrFnj9QFlEQfsnjaBAVCtQLtMOc+dO7dWVJ3PICAhnQGmLQ9nTWVTmRkxWmdIRzR27FjvFcA7EAbE5dSpUyvOYVRZU7yLFy8upU1aJqSZ+mVNKvcP/3EsHNHSYYfnw308YogfCzwD6YZx4v1QSFebdoqntOwe2hYjgEexX79+fj0t3l8LlB+Cy9bYNnMa3rymsZfE7h1uqfcrV650Dz30UMX0ZBhP+38SWLFihe9Q9+7dW0LCoBcvNLyxu9DWSpHq2Ll69aobMWJERV3JuhSvFoP/MC9ZcXW8kkBb22ae8iEudQehFQfLp8RXTKa5v0MPc7xEi7aaNpJlXXHgGOfqcVqYkEaYKxQjICFdjFtDVzFaDKdIwymW2BNoHqW4MTMRjLeI6wmIXsQvHqEvv/zSi2bO4YV45pln/DQQ00EWLA0ay549e/oGE8P9+eef3bPPPlvRITK6ZV0to2IT+wj8iRMnVsS1aSemd48fP+7zaALpwQcfLFv7yqCApQQIuPfff99PO5FHpv9fffVVP7L+7LPPLNvaNkjAyt28lJQLSwJsJsAGPPU0wvVmZdOmTb4cqc+pYHmyez/22GPem2J1O3WNjv1JADFj7Qa8mGVgYGss2Q4ePNhdvnw5NzJsHU8YHrH4hVBmrQ4ePFiW5vz58/1LjrQ/vLhqgzLaJNZzsuxEIZuA2UFb2Wae8mEwxKCIqf9w6QBljnMDj6eEdHZZFj1jsxJmv/SX2E48M2sDHfrfM2fO+D4WxwjLqeh3ub5WG04/jvOOJSKhNiia91a9TkK6HZU8jVK8Dsqm4uiUQu8hBhKPRq3hS4kVG6GGU3HWaDOFHnu8iYch1uOVJC9xXPNex3lBHPM1gbABZgkBQiAcFFixmOeDDkChOQSs3BHPlA+DKMqPZUZ04Fu2bKmrEa43N7ZcJJ7hCK+3PJEP+4ewZwmUxHRIqnIfW0Io44E2gQNDvq7B9PyECRMKC+lUuVj5sI3XX5OX8Hy83xZr7yuJdNwjxrutbDNP+ZiDIy7D8HfYjndc6u0r57GQhrct3QoHouY4C8sj3q8mpGlXGRwj1NmqnS1eDySki7MrfCXGQMXlM0IYSFj5YyHNiJE10uZx4qa25INlH+xbMPEbphfvh4LUGu16G0PysnXr1rK13WH6oeg2UT969Gh36tSpkkeaddg8M9PRFvB0xxzCdNnXWlmj1fjWyt0YI6BZX2ufQLJBUDjoauSuNOaUL17pegIdCTMwCHx5SmoTi8URoto+HWidMl7FcJlW7VT/jGF1hbJ47733HEs9CDa7RLmG73CQF44h6L///ns/cxW+MKXyrE7eeLeVbeYtH/qq8HNpDJxw6nzwwQfe6RP2J9WfTGeLEKDPxY5shmnVqlVlyTDLFL5XhPOBthx9gR1mteGIZhwmlGe8ZKTsBvpRFwEJ6bowNS8SHRudjDWU8TYW0tzZXuww764tm7DfljvzDMdphr/Dhs8a7XqENAbNGuswrXg/FNLE/4//+I9k/GHDhrnz589btr3XO04r/i0hXcLV8I7NANDQvvDCC36gEybKWjnO1fOiSnhdap8ZCL7SUc0bnbqOY7Z2r5pXJevaVjrOLAL2gtj95JNPyqaA+aQVX14oaj/WRqTWT16/ft0xUA7XX9OWcL/Up7SsPMN2opXKqZ5nbWvbbFb5WJ/E8gKFtidgM7OhrVW7K9og610FBrYIbc4vWrSorL2olqbOZROQkM5m0yZnbIlFuO7MbkQjlxLS5oHm5UKm2/Dm0lnFyzHMkxivp7b04611kvUIaTNk1jry5Q6EsgUT8GEHySiauLzsZF/tYLSM98o8n3a95SP0Uts5bZtPwOpTqq7ZS4HhDEjRHOD14KVBRHm9dTK8l301IMurEsZt5X3eH4Axg+TQLmECd87Zmtu8nEyIv/jiixVp25KdsHNHcLPkjO9Ox4HOHcEfthNxnFb/3da22YzyIY8MzGjXmW1UaHsCZof1DIhxUuGsevLJJ8s+AEAu0Q+IZ5ZzvPHGGxU23fZP0jnvICF9l8vVljGwbtGmWtkyQsSjlBI3ZJFOiHOsg8QLlFpPjLC2734ytYv3u1owAVuPkLaXjhgA2GfRaFCZHmIqOe4gEdePPPKI96bzfHEHH+bLOg+ENs9p08dhHO03l4ANfsKXQRHR9h3plCgjBzTSeJcpK6Z3q62r4wWY/v37+2lJ0q43UG/xeLHGl+8X856AQjYB8/ozTbt27dqK70hntSmUHX/whqU9vIyc8iJjtwx+w7TJCTZtdSWcbrbZsrBekYZ92zblAMh+stY8U9Q2eSEQe6EfyZpNKlo+1BXKnEEbdsngjD6rmv23Zuk196mxHQYrM2fO9OI3a3kcXmYcVPSflD/2Gn+uko8IvPTSS97eKTuuUWgOAQnp5nCsOxXr9OKlC/Y7q9MzkcxUPN7CrDds//73v3tDsfTCbZx2HiFtay3D9OL90NPER99pbOM4/E796WKuRZyl4nMsTLtu2IqYScC8Sine8dKbMBHr5Lku9ESGcding7X18NW80dXqFfUh7gzi++j3nwTgROcZl2c1wcP6ShsEV7MxBBqzS3Ha/MZDRl2yQMef9dnLVOdu12l7h0BR28QhYmWU5RzJWz5hmpY2dUrezDvl1cy9sH013mxhHn++NqvtxM4Q1PEgJyvt8D7qZ4uVpoR0MW4NXcWyBz4dhScIA2H5A18nYAo7Frt2I4wCLzSV3pZ42LlwS7z9+/eX/dEUM5Q47TxCmnsg5hHy1mEjiBnZ0omz3io0Qka/vAzJM/LNYrvG8sI29GSRPp7H6dOnJwV1mHb4vNovToAOO3yRCOHKiyvVPpOGR/rpp5+u6ZG2r83E3xePc5vqDJgy/s///M+KpUvxtfp9h4DZfdyuYJtxh2pXcZw2B9vM8khbXMoz/JwdZfTOO+8kP2fH9DEdOe0Ddk76fKILQa5QH4EitglfmFfzSHP3POVjQpp+irRZFkBdyKpT9T2dYmURiMUubTL9PUulYg9y2HbSzzJT8D//8z9lfzU2vE+cdtgX27762ZBY/fsS0vWzUsw6CeD1YPSMcI+/M0tjgIcS4V3No1nnrRRNBERABERABERABO4ZAQnpe4a+897YpowR0oxw7fvXeDF++ukn79VgBMxneuTZ6Lz1QE8mAiIgAiIgAp2dgIR0Zy/he/B8TB3yhxdsuii1HTRokOPPgiuIgAiIgAiIgAiIQEclICHdUUuuneebNX6sn2bdFuu3ENOss+OP0Lz11ltlLyi180dR9kRABERABERABEQgSUBCOolFB0VABERABERABERABESgOgEJ6ep8dFYEREAEREAEREAEREAEkgQkpJNYdFAEREAEREAEREAEREAEqhOQkK7OR2dFQAREQAREQAREQAREIElAQjqJRQdFQAREQAREQAREQAREoDoBCenqfHRWBERABERABERABERABJIEJKSTWHRQBERABERABERABERABKoTkJCuzkdnRUAEREAEREAEREAERCBJQEI6iUUH2wuBd9991/8xF/7UuIIIiIAIiNhqJ/IAACAASURBVIAIiIAItCcCEtLtqTSUlwoCEtIVSJp2gD/lvnHjRjdgwAA/WOnataubNm2aO336dMP34C9bLl++3PXt27f0p+LZ59iNGzcy0//999/dvHnz/DWPPvqoO3r0aGZcnSgncOLECV9+lCN/SZRypXxh2kj4448/3P79+9348eOdpc2W3wcPHiwl/euvv7pJkyaVyps8pP5pUFxClrnT3mwz9ZdqKWvKXKHtCHz++eeuS5cu3o7oC1Mhj90XbZdT99WxOwQkpO+w0F47JCAh3TaFQkc9Z86cpNDp1auXO3LkSOEbX7lyxT399NPJtBFWM2bMcNw/FTZt2uQefPBB/09COkUofezAgQMOXinhunDhwobE9EcffeQeeOCBZNoI6j179vhMSUinyybv0fZmm/v27XN9+vSpKH8J6bwlmy8+Ahnu1YR0HrtvpF3Ol/PWiy0h3Xpl3qGeWEK6bYpr27Zt7v7773fDhw93P/zwg8PreO3aNTd79mzfYVYTu7VytGPHDp8GadF4E27fvu3F+ZAhQ1zv3r3dyZMnK5JBvCPiuffLL7/shaE80hWYKg5QbmPGjPFid/Xq1d7jD+9du3Z5nqHYrbi4xgE8WBMmTPDp7Ny5syTIOb5y5Upfh5YuXVojFec9lwivoUOHugsXLtSM38oR2pNtmph7+OGH3d/+9jd39uxZb8utXD5349mxr8mTJ3sh/c477/j2NPZI57X7ou3y3Xjejn4PCemOXoIN5h+DXbZsmevevXuFxwHv1ty5c8vuEE8jcR3Xk04c4ulJvFqjRo1yeDgQbnG4ePGiW7BgQSkvxO/WrZvPl6aDY1rFf5vnEA/m4cOHyxJC+CKus8RuWeSMHwgB6s6WLVvKYrDE4MUXX3T9+vXzHXJ40jqOYcOGufPnz/t6J490SCh7H9tgUMSSmHgZB0s7KIt6xG7qDtevX3ejR4/2MwxXr14ti4IdP/TQQ27FihVlx1M/TBzGYiAVt5WPtSfbpI1esmSJX86DCFO4OwTgvm7dOm9bcMe+seHYdvLafZF2+e48cce/i4R0xy/Dwk9g4gUjzfoXCmmM2tZIxvFNAFlmqqVNp09DEYppvJGp6UO7j4S0kW18i1cJMct66Fu3bpUSxMOB4LIyLsr80qVLXowzEFq8eLH3QJ47d84vJWHZRlz21AO8m9zX7km9k5AuFU3VHYQs07979+4txcMjzZILBkXYUCPT8OvXr/dC/YknnnBfffWVu3nzpmPtJnUotvtSBoId85yRF5uhCE5rNyDQnmyTgdOIESMq2okgu9ptAwK8j4ADibaTgTFtIjYcC+m8dp+3XW6DR+u0SUpId9qirf1gn332me8gX3jhBYfQIdDRMSUfihqOMx3LtCzHN2/e7AUYAujMmTNu4sSJ3tBff/310k0xeoyfc8QhLqKNa0mDF89OnTrl4+O5njp1amlq2rzbxEdgkY4JrNINtFOYAMslEKnmpYQ3SwKYvoW1/Ysb7jw3pD4988wzpbRIs2fPnm779u0VU8OULXVi7dq1pcGVhHT9tGFlMwjYGbMMLPWwcmQ7ePBgd/ny5foTDWIiyt9///2y+sFgeObMmY5ZpFqBde/ExzuuUJ1Ae7LNY8eOuR49erg333zTD37tpWQGyOPGjSt70bT6U+lsvQRM7NJvWj+YJaSL2H2edrnePCuecxLSLVwLEEp4CO1lIUNx6NAhPyIOhZRNCyFs6azDgFDu37+/nwJmKtimgznGuTBwrYlj0iRY5zFr1qyKqWkT5BLSIcXG9o034hlxg8BFbNFBIq5ZksHvsPzz3pFOgLRI0wQd+3TK4YuGLOPAq8l6QOs4uJeEdP3EYYVQxo6nTJniRSvM8SAzi8Qa50aENDbLMg7Ss7K09Gu9lGpLhUaOHClvdB1F2p5s0/ISlnm4z+A37jvqeERFySCA95kXg+OXvasJ6bx2X2+7nJFFHc4gICGdAaYVDuMdpmF86aWX3E8//eQfmS2/OW5ClxOIqnj62BjZuj7rrPF8sR8vHbD4ccNgvzds2GBRSlsJ6RKKpu3EHSQCl/W15l2sVh71ZILGGmFsXkumq6lLJtjtKxJ0HNyXJT2svQ+DhHRIo/o+rEKBg+198cUX3vNvtslLnkWXVdiSLptRYHCMN5ryjTv9OKfYL/HwSivUJtBebJOcWl4o9/fee8/ZGnnsG2cI5ZpyftR+SsVIEWC5FIOT2FasPY4dG3ntvt52OZU3HatOQEK6Op9OfRbPcTz9bh1y7CHEiHmxiM/txME661hI8+WF+OUnro0bBvuNsI+DhHRMpPHffDGDpQB0hCzrsSU2lvKaNWv8Ob76UCSsWrXKC7v45TcEGILOBmQ24LI6V20bdyJF8tVZr8HzDzsEzyeffFLm8WeqeODAgX5gw9rmvIGpYK6PX0xlucfbb7/t75s1YLblYPJG10+9vdgmOTYhTXsQB5t1bGTtfZxmq/+OhXFWe2jM89p9ve1yq5dDkeeXkC5CrZNcg4BFHPMlDftqR9ZXOMx7jTHGwZZ2mIHbSyp0wLb22q6xpR2IOO5P4CUpxFX89r+NoGlQLK6lo21xAnAdO3Zs8mU+ezHM1twWuQsdgonl+Hpr/BF8EtIxnWK/7V2H+fPnVwxcmQnA1uBeJJiYSollE31PPvmk/3RimH5o5+HMVhhH+5UE2ottkjMbhPGlndghYkt2rM2vfBIdyUsgr5DOa/f1tst58634WiPd0nUAw8KLTGdJw4iXKSsw9c40Li+k8QcaeBGQzvL48eNeiCN27WUijvPZJI7h2UZoc4y/aMcLZUxfhW/wm9eL9Hfv3u3zEaYrIZ1VKsWPm6efQRSsKR9EtH1HOiXKuBtrmvEwUg8++OCDivXyxOFaymzRokWl5QTULb5XzVcAskR2+DTUTX21IySSvW+eX3thEzuDt31HOosjZc4XOVjaw8wUwikO9sIZ7ztgmyaoEHx835ayTIlsG1zz0iP1SqF+AkVt0758xMxE1mxSHtukrFm6EdYrnoK+wtqJlGOl/idVzHoI2IxtPCuX1+7zlH09+VKcOwTkkb7DouX2+MoGgif+hwcLocvaSDpbAls+W8a5OD6/n3vuubKXxewlslRcRBhpWyBtvkUdxyUeDTnH5ZE2Ws3Zht7+mHu1T5pZJ881Wd4o+8Mqcbr229ZIV3sSCelqdCrP2TpmY2xb7DX+3KBdHc8IpGwMMUV5WXrxNrVG2uyZe8sbbbTr3xa1TWzGyof9VMhrm9Xix8v/UvfTscYJZAlpUs5j99XKknpTT7vc+NN0zhQkpDtnudb1VHiNBg0a5D0OfBM2/MIChkVHyPSRBTpIDBePpMXlk0h4osMvMVh8OurwD6zg2cB7Fb9YRnyuX758ufd0kjaf/yGeNSKpTt7uo20xAnTYMEcMUd4MXCgvyi0rMEDiz39X80hzLS8YskbalgxRptSbTz/9NFlX4vtJSMdEqv/GNvn+LJ8lgzW2Gw+G4xS4hhd8scssjzTXIKa3bt1aZveUK+Ubr68nvs1eyRsdE6//dxHbRCjRHlfzSJODvLZJedJuU09oJ/h0KbMRzHwotD0B6wNjjzR3zmv3ecu+7Z+uc9xBQrpzlGPup+AFQT6VxctfCOow0HHyaTQazSzPRhhf+yIgAiIgAiIgAiLQigQkpFux1IM3slkjzR9wQDwTWFt5+vRp9/zzz3shnRoFtygyPbYIiIAIiIAIiIAIlBGQkC7D0To/eGGEqXa8zln/+LJDtWn+1qGlJxUBERABERABERCBSgIS0pVMWuYIIpkXDllXx5pKBLWtZeWLDKl1zy0DRw8qAiIgAiIgAiIgAjUISEjXAKTTIiACIiACIiACIiACIpAiICGdoqJjIiACIiACIiACIiACIlCDgIR0DUA6LQIiIAIiIAIiIAIiIAIpAhLSKSo6JgIiIAIiIAIiIAIiIAI1CEhI1wCk0yIgAiIgAiIgAiIgAiKQIiAhnaKiYyIgAiIgAiIgAiIgAiJQg4CEdA1AOi0CIiACIiACIiACIiACKQIS0ikqOiYCIiACIiACIiACIiACNQhISNcApNMiIAIiIAIiIAIiIAIikCIgIZ2iomMi0AIE+MuVGzdu9H/Zkr9q2bVrVzdt2jR3+vTpwk//66+/ukmTJmX+2Xn7c/TffPNN6R5//PGH279/vxs3bpz/y5r8lc3HH3/cbd++3d2+fbsUTzvVCZw4ccKXH+UIZ/5iKeX7+++/V7+wxlkrn/Hjx/s6YnWF3wcPHiy7mriHDx8uy0f37t3dggUL3MWLF8vi6kc2gbawTbvbL7/84pYvX+769u1bslP2OXbjxg2LVtpSptStmTNnevucO3du6Zx22pbA559/7rp06eLL6d13303eLI/d055+8cUXbvjw4f6vGfOXjGl3YztO3kgHMwlISGei0QkR6LwE6KjnzJlT6khN4LLt1auXO3LkSKGHLyKkP/zww5JAC/OBoF63bp2jI1eoTuDAgQPu0UcfTZbnwoULGxLTH330kRdQYdnYPqJ9z549pcxdvnzZDR48OJkPOu9Lly6V4monTaCtbJO7XblyxT399NPJ8qFMZ8yY4bg/wQZFCC1s0cpcQjpdbs0+ikDu06dPVSGdx+4pz7feequsLK1MseMdO3Y0+xFaJj0J6ZYpaj2oCNwhsG3bNt+gIm5++OEH32leu3bNzZ4923eYYYd656rG9kxkDx061F24cMEndu7cOTdw4EDfYezatcsLPrwmCPkhQ4Z4UU+HopBNgHIbM2aMF7urV6/2XkUYwpNBUSx2s1OqPIP3csKECT6dnTt3lgQ5x1euXOnr0NKlS0sXItRmzZrlPVx4wum8z5w54yZOnOjrVTgTUbpIO2UE2tI2EUuIJ+ycsiKE9ta7d2938uRJf5zztA/Ep36tWbPG70tIlxVXm/zAviZPnuzbxXfeecdzjz3See2eWb9u3br5NL/99ltf7sxAmB2PHDmyVCfa5KE6caIS0p24cGs9GoZJw3ns2LGyKX463/Xr15c6zTCdeBqJadtly5Y5DD8O8fQk00ijRo1y+/btq/Aykhc8akePHvXTiHS8xEcEkH5qyjG+n37XR8AELbyZhg+DdZ5hhxqeb2TfBELYIVjHzhKEOOzevdt7YzZs2BCf0u+AAOIUj+G8efMqbBauCKFQ7AaX1ty9fv26Gz16tPdiXr16tSw+dvzQQw+5FStWlB1P/diyZYvPI+JeIZtAW9smNkh9oDzCwKDnxRdfdP369XNnz571pxgEIbJsiRX1jGslpENyzd+HOzNx2Bbto3EP203umsfuSZP2gWUiLBcJA/301KlT/bm9e/eGp7RfJwEJ6TpBdcZoGCbG+pe//MU3kDSS9o+OedOmTWWPjVEjbC1OuB02bJg7f/58Kb6NqMM4tp+asjch/d///d/u4YcfrrgHYprGQKFxAnSUdJish75161YpQTwcCC4rYxrqZgXznuDhMk8YaW/evNmXNR18HCyf6rhjMuW/EbJ0kGEniJeRJRfmUWTdOiKtSGBQjc0+8cQT7quvvnI3b970nTF1KLb7OP3Q24mHLTXgjq9p5d9W59vKNllaQ53ASbF48WI/M8SsEMu8HnzwwapLqUzQyR7btoaa55jyYYBj3GMhncfubUA8YsQIFw6IcVAh2h955BHfDsf3aNsn7TypS0h3nrLM/SQYDeKWTvK1117zAoeODw8EYgpPFAZIYCqeKXmOI34QYAjbcNr29ddfL+XB0sazTBzicg3XkgYvt5w6daoiPvlhGvH48eP+mkOHDvlp5bgBKF2ondwE8PrjjTYvJeKGJQHxAKaZjSqDMupZ7HlG/CECWcbBcg7qH50HL79MmTLFX6OOu3oRw8dmELAzZhmwIRu4smXdMuuXiwTK5P333y+rH5QlL59lvUBInsL7s9zD2pIieWiVa+6GbSKcn3nmmbLy6dmzZ8nznMXaBJ3sMYtQ48dtoEO/aYNO4x63x3ns3t5dsAEaXuhPPvnEUe6hnapsi5WhhHQxbp3iKgwTEfPee++VeXsRMi+//LLr0aOHX/bBw9qUIFN9sWcYody/f/+S8LbRL8c4FwauJQ2MN/RCmvD+r//6r9LLLlyH9wtPViNCILy/9p1fPoOQRjwjbK0xxUuFuGbal/KJG+6i7Gy5SGoNHg0667HDxjzeV+NenTx8sA880Db4gCEeZGaRWOPciP1gsyzjIL2wbPid9VIqeQrjso8dFxXz1Ql0nrMmpNvSNhFo2Dn2bmXE/ptvvlnW9sZUTdDJHmMyzflNv8uLwfHL3sY9bo/z2L0JadbG89UO2gPK3gbEX375pXeuqGyLlaWEdDFuneIqDNPWJccPxIsl4TnixtPHdo2t67PO2ozWRr8Wz7aphoH0MWzOKbQtAeusw06U9XPmXbTyadbaZMqWBjteKmRPyfQin96iAyFPzFhQdz744ANfB+fPn29RtU0QiEUrdkhniSfZbBOPf7ikJpFM5iFb0mVeSwbHeKMp07jTjxMhD3xO0eLjmUYwKKQJtLVtIqIZ0JiAYikJDg0bTFf7wou1CxJb6bJr9Chrl2n74nbSuNOOhiGP3VufbG0+5c+g217ktnqntjYkXP++hHT9rDpdTAwzFMvhA7L+KjxHXNZT87mdOFhnHQtpvNqpTjPVMJC+hHRMtm1+81Y+SwFoTF944YWyJTbckUEU55rxYpgtCUp5o2s9nb1syHIghWwCeBexHcQQ07V4+S0wVcxXURBPzO7kDfZVFdqC8MVUBPLbb7/t75s1YA7vZbNU1kaE57R/h0Bb2+aqVat8mcUvpjI4YrCV5Swhh9ZuS0jfKa9m7sXC2ERvvLX3HfLYPeuiWR5JWiz7wpbDmWUGy5xrlvOkmVw6QloS0h2hlNooj1lCGq/F2LFjy97gtpfCaIjjYEs7zMDNaOnA6YjDYEs7EGo0zBYkpI1E22+tfMOBkt3VXgq0Nbd2vMg2LOtwGU89aZnnLF5LX8+1rRbns88+8wMfvEnxwBXu2BqdbpFgnqqUWDbR9+STTzrqTbVAebLEpBHPeLX0O8u5trZNxFqWWDZhxmAsFSSkU1SadyyvkM5j97QLzAZR9ojmMHDulVdeyawXYVztpwlISKe5tMRR8zLTQNrXG/Ag/vWvf63omJkCYhqXF9L4Aw32siEvBfJJO0az9iIZAmrJkiX+GJ4whDbHmMJfu3atn76Kv94gIX13q5zxpuzsxU7EkH1HOiXKyCFfZsG7TD1g6QXlmhVsgIUHpJbQIg3SYvkBHQTrbxGA+oMsWXTvHDevP9PC2Bd2hsfYviOdGjAZb77IwfpYXj5L/bEUPo3JuxK878AMgQl1BB/ft6VjTolsyx35YMkQIo376Os7RiZ7W9Q2Wa/OH/BgZiJrNgm7pq1etGhRaakPZcS35PFYZolscishnV1mbXnGuFMvwpDX7hHQlO+gQYP8+xSUO22FfUfaHGHhPbRfHwEJ6fo4dcpY1mDTsMb/4s9aIXIQNYibOC6/n3vuudJbxsBCcJFGKi4iLB4VW15oNBTanoB5fFPlE5d9mBsrJ66r1vBSXxBN1Jda3uiUJ4br3njjjZJwC/Og/UoC2BNCOi7PaoOReN1kyvYQzqybjdO13/Eaaev07Xy4jduIyqfQEQgUtc3QjthPBcS2vYsQlo3tx2ukwzQtTrit1gak7q9j+QmYTcVCmpTy2H01W47tOH8uW/sKCekWLv9QFFnjyFQ6L37RmMcBcYTh4pG0N74HDBjgPdHhuky7jo56wYIFjj/aQvr2Epm94GDx2FpeUp15GE/7zSNAGYcv+THAobwot6zAAIk/MVzLI20zGPV4o62zRvRRn/CWcT31TaE+ArDi+7P8OWdsE5bM+mCvWRw5zppI7DLLI83d6YC3bt1aZvfYNOtsw09YEtc6fWtPSJtlYv/4xz80KKqvKH2sIraJSMZ+qnmkSZwXDCk7a5epL7Tpn376adn6euKabVp5xlsJ6RyFWjCq2VRKSOe1e/ppZo6pJ2GfzAvBCsUJSEgXZ9fhr8Qws6Z9O/zD6QFEQAREQAREQAREoI0JSEi3MeD2nLyEdHsuHeVNBERABERABESgvROQkG7vJdSG+ZOQbkO4SloEREAEREAERKDTE5CQ7vRFnP2AEtLZbHRGBERABERABERABGoRkJCuRUjnRUAEREAEREAEREAERCBBQEI6AUWHREAEREAEREAEREAERKAWAQnpWoR0XgREQAREQAREQAREQAQSBCSkE1B0SAREQAREQAREQAREQARqEZCQrkVI50VABERABERABERABEQgQUBCOgFFh0RABERABERABERABESgFgEJ6VqEdF4EREAEREAEREAEREAEEgQkpBNQdEgEREAEREAEREAEREAEahGQkK5FSOdFQAREQAREQAREQAREIEFAQjoBRYdEQAREQAREQAREQAREoBYBCelahHReBERABERABERABERABBIEJKQTUHRIBERABERABERABERABGoRkJCuRUjnRUAEREAEREAEREAERCBBQEI6AUWHREAEREAEREAEREAERKAWAQnpWoR0XgREQAREQAREQAREQAQSBCSkE1B0SAREQAREQAREQAREQARqEZCQrkVI50VABERABERABERABEQgQUBCOgFFh0RABERABERABERABESgFgEJ6VqEdF4EREAEREAEREAEREAEEgQkpBNQdEgEREAEREAEREAEREAEahGQkK5FSOdFQAREQAREQAREQAREIEFAQjoBRYdEQAREQAREQAREQAREoBYBCelahHReBERABERABERABERABBIEJKQTUHRIBERABERABERABERABGoRkJCuRUjnRUAEREAEREAEREAERCBBQEI6AUWHREAEREAEREAEREAERKAWAQnpWoR0XgREQAREQAREQAREQAQSBCSkE1B0SAREQAREQAREQAREQARqEZCQrkVI50VABERABERABERABEQgQUBCOgFFh0RABERABERABERABESgFgEJ6VqEdF4EREAEREAEREAEREAEEgQkpBNQdEgEREAEREAEREAEREAEahGQkK5FSOdFQAREQAREQAREQAREIEFAQjoBRYdEQAREQAREQAREQAREoBYBCelahHReBERABERABERABERABBIEJKQTUHRIBERABERABO4Vgd9++81t3LjRDRgwwN13332ua9eubtq0ae706dOFs/Trr7+6SZMm+fRIM+vfN998U7rHH3/84fbv3+/GjRvnHnjgAXf//fe7xx9/3G3fvt3dvn27FE871QnA6rvvvvMcYfjuu+9mXnDx4kW3YMEC9/DDD/sy6tWrl1u+fLn75ZdfktecOHHC1w3qCGVKnaHu/P777xXxyccXX3zhhg8f7suSMqVsDx48WBFXB+onICFdPyvFFAEREAEREIE2JYCInjNnTlLoIqqOHDlS6P5FhPSHH37oRXwsuhGD69atcwhthWwCCNevv/7aC9eQYZaQ3r17t3v00UeTZT9lyhRHGYbhwIEDmfEXLlxYJqYpq7feessL6DAv7CPCd+zYESat/RwEJKRzwFJUERABERABEWhLAtu2bfNiB6/hDz/84MXqtWvX3OzZs73AmjFjhkNsNzOYyB46dKi7cOGCT/rcuXNu4MCBrk+fPm7Xrl1elCEMEfJDhgxxiHq8oQrZBOADJwYeM2fOdK+99povwywhff78eTd9+nT31VdflXjDnjS6dOni9u7dW7oZdWLMmDF+pmD16tXuxo0bfpbA4iOO9+zZU4rPzEK3bt18eX777bc+LtesXLnS52/kyJHuypUrpfjaqZ+AhHT9rKrGPHXqlJs3b57r3r27NxQq8fjx4/20WGrUblMsVF6bMmN/3759yVG+TfdY+kz7LFu2rGy65/Lly27w4MFu7ty5FXnFcBnpHj16tHSOfY5xjmkj0iNdjJ7pnngakTzv3LnTP5dNI5EfpqHIXyqQLtNSffv2LXF59dVXS/Fhs2TJEn/Pzz77rCIJOoypU6eq0a4gowMiIAKdjYAJWtrlw4cPlz0eIgdx3bt3b3fy5Mmyc43+MPEeCjw8lHgrWSYQBzynCLsNGzbEp/Q7IED/hXi2fh2+MA05B9Ezd1esWFFxHUtw6KvRHfEyDsqM+yxdutSnST9LPMrs888/L7uP9bGxUC+LpB9VCUhIV8VT30kTsFTc+B8jQEaCYaDSM+0Sx+U3Qpj0woAHgBFpKn5okJaPvEKavCDi4/TjESqGG8ex34yMGSGHgdH1sGHDkteEeTx06JAfKb/88ssVDYKJ/VmzZlWcC++lfREQARHo6ATOnj3r+vXr59e83rp1q/Q4tK2IInNghOuYS5EK7phnE5EeeiQ3b97s225Edhwsn2E7HsfR70oCjQrpcPkF4joWvzi78EJTlvTNrIlncHb9+nU3evRoN2LECHf16tVSxvBIs0TnkUce8fFDPVGKpJ2aBCSkayKqHQEBy7QNa6FsDVM4ZWKjQktp06ZNfiQ5aNCgsimcH3/80Xtnw8aMfQQtI08Er0274en9+OOP3UcffWTJegFexCONwfXs2dPRYCLyf/75Z/fss89WGClTRqzdO3bsWEnUMv03ceLEiriMcpmCJG1ekmGKi1ExnQONAVNRFmxEnJoqXLVqle88wikqu05bERABEehMBMxxYH0G7Txtpb14Zo6LZgoe649izzPLCBBqLOPAmYNIo3/gxTTW69InSUjnq31FhLQNdFhmQ39rAfY2O0HfygwGDi2rI2zNMWdONvpi+mD63E8++cT3+2F8lafRzbeVkM7HK1dsaxTDyllt6i6VeLXpmzi+GUt4P4uDAWct7XjqqafKDJRrmLLDwOrxfFjjEMa1Z0dk0xnUCizroGEOG3MbRbO0A8NXEAEREIHOTMDaTcQzbSEODtphlv8hBgkGtQAAIABJREFUrrds2eJ/N0tI23KRePYRxqEzJBRb4X6qr+nM5dPos1lfWW/52ew1MxGhN5p8wB6hjJPJBjaUzRNPPOHjTpgwoUJIs86er3ZwHXHpc3ECfvnll14fqDyLlbCEdDFuFVcxYuclgdhzQGUNK6eJ3cmTJ7ubN29WpBMfwOCo7HiDawVLO7yfXUM6WUI6Fd+uC7cY9datW72HnIY9bFDZD4U0+xyrdw2dNehjx44tCW/W4dGApNZOh/nSvgiIgAh0BgImpK1tpZ1lbau9g5K3Xa3FxPoXvNKpwMwq77jY0kLaY7yaH3zwge9P5s+fn7pMxzIIwJuyZVsr0N8yI/vggw8mv5BCv231hC3iGJHMzIE57JhNoG81bWDx0RSIb3tZ1OqdyrNWqaTPS0inueQ6SuNma9esoobbUKhahbYpllo3wuDidVBZ11ja4f0sLuk0IqQx6sWLF5cZbviM7KeENOvs6g00GqwpZ820vYQYr9urNy3FEwEREIGORoCXCJmuR+i88MILjpfYw7BmzZq6HSvhdal9lgnylY6UNzoVPzxmLxvmad/D61t1v14hzWzAokWLfN/PGmbEcRyYoaDfZdaCZRrhrO2lS5f8F1fMYce6aNZHE5/lHywDoY+1YC+W1uv4suu0/ZOAhHSDNQGByUtyNHx8RiZcxmCjvFDY2nKF/v37uzNnztS8O+uWqfykHVb81IUmpM14LA7HWWLRiJC2l0vwTPDlDp7bgjUOoZDm+5YPPfSQe+WVV8ri2jWprX0qiAbCPr2EuFYQAREQgVYgQP/BrFzcVvPstlbW1sU2woO+xD57lnqZsFra5JE+hi8xxUK/2nU657wnmv6cPjMr8I4S3mKccyzvyer3bTkkXuSwPyZdyhRNYmvtOc8L+zjl4iUinKOfrtdhl5XvVj4uId1g6bM8g0aFSkjlrfVCBkbBZ+YwplGjRvkXN6jIHEdYc46pGAuIS77jiVGxbs7O8cIA9wtfNrRRJ42wfSfy+PHj/j7cL26cU0Lf7htvecGwR48e/sUT+/QSDSpr9my9VSikyWf4kiTCmGfkWfFmhC8b2r3M2PGSrF271o+o6xls2PXaioAIiEBHJ2COCfoH2m/aTUS0fUc6JZx4ZpYX0lfgoay1FJB2FWdO6mtLKX7kgTYd8cYaXESa/iBLilT1Y1a2WUKafpKva7BElHXLcM8KNqOANqC/ZBkO+sO+Ix339whodAofOWBdNXHDjyLYFz6y7qfj2QQkpLPZ1H3GjAOxmvoXeqRJlM/CITJTcRGleJAtYEjr16/3L5uk4ocGSVwT6WFc1tmlvBx5hLStuQrTjfdDIU3+MdzUmnGui5nY89qUIXFYG1itIbFrtBUBERCBzkLAPL5x+8pvPidK/5EKtKl2TVb7ynXWTyCGa3mjwzQtba574403KrygqTy1+rF6+s2wrFK8jTvblDhOLStNDXRwVGV9dpeZZgZiCsUISEgX41Z2FWuT8LDaCxmIR0QgyxtYlxQail1IYxn+oRLELn8EhW9Ox+KR3xznD7yY0XAvriedMPCbtVXEw5gQ7HwMHpEbG2EeIc09GC2zbs/yMGDAAO+VsJFuLKS5Bo96+BImbGr9ARdEPyNnRLWCCIiACLQaAesfwj6FdjN0ssRMEEK0ybU80raErh5vtAk7+hLSpm/h+riPivOi338SaGshbdoA7YCGoJx4r4g+OVVGaBWWi1CWCHP6ct7Xiv/4msovHwEJ6Xy8FLuNCSDWedNY00xtDFrJi4AIiIAIiIAINExAQrphhEqgGQRYr4VHhdE0o+SUd7sZ91EaIiACIiACIiACItAsAhLSzSKpdAoRiKe+Umu7CiWsi0RABERABERABESgjQlISLcxYCVfnYAJaVvbZR+Ur36VzoqACIiACIiACIjAvScgIX3vy0A5EAEREAEREAEREAER6IAEJKQ7YKEpyyIgAiIgAiIgAiIgAveegIT0vS8D5UAEREAEREAEREAERKADEpCQ7oCFpiyLgAiIgAiIgAiIgAjcewIS0ve+DJQDERABERABERABERCBDkhAQroDFpqyLAIiIAIiIAIiIAIicO8JSEjf+zJQDkRABERABERABERABDogAQnpDlhoyrIIiIAIiIAIiIAIiMC9JyAhfe/LQDkQAREQAREQAREQARHogAQkpDtgoSnLItAMAr/99pvbuHGjGzBggLvvvvtc165d3bRp09zp06cLJ29/qZL0qv375ptvSvf4448/3P79+924cePcAw884Pgrl48//rjbvn27u337dimedqoTOHHihC8/yhH2lCvl+/vvv1e/sMpZyqlaOXJu0qRJjnK38Msvv7jly5e7Xr16+WsffvhhN2vWLHfx4kWLom0NArLNGoA62Gnase+++863cbRv7777buYTYCcLFixw2A32hR1hT9hVKuSxe/LBXw8ePny4b2dpb2l3Dx48mEpax+okICFdJyhFE4HORICOes6cOUmRRMN95MiRQo9bREh/+OGHXsTHgo0OZ926dQ6hrVCdwIEDB9yjjz6aLM+FCxcWFtN5hfTly5fd2LFjk/kYNmyYO3/+fPUH0Vkn2+w8lQDh+vXXX3vhGrZvWUJ69+7dmXY8ZcqUsgErlPLYPe3oW2+95QV0mBf2GXzv2LGj84C/y08iIX2Xget2ItAeCGzbts03qHgmfvjhBy9Wr1275mbPnu1F0IwZM3yH3sy8msgeOnSou3Dhgk/63LlzbuDAga5Pnz5u165dXvDR+SDkhwwZ4r0xeFwUsglQbmPGjPHe/NWrV7sbN254Tz48GRTRSe7Zsyc7gYJn6Hi7dOlS5l1btWqVr1dz5851V65c8SnjSVu5cqU/vmTJEg2MavCWbdYA1IFO03ZhgzgFZs6c6V577TXfvmYJaQaa06dPd1999VWpLTQ7xtb27t1bevq8ds+sX7du3Xxb++233/o2grbCbHPkyJElmy3dRDt1EZCQjjBZZ09HQAXu2bOnn2Kh06BzZ7qUKZfNmzeXXXnq1Ck3b9481717d28odF7jx4/3U9Ypj5pNsVB5bTqb/X379pV1NEePHvUjVAyPDmnZsmX+/hgmUzLxNHw8Tc5os2/fvt6zh6ejkUCed+7c6Z/Lpo95XqahwmlbazzwTKWmoxh10yjEnSrXTZw4Memd5DnC5QCNPEerX2t1HA/m4cOHy3AgfhDXvXv3didPniw71+gPEwhhJ4JdUbYsQYiD1ZMNGzbEp/Q7IIBd0B7Q/sTLOOAK36VLlwZXNL5rnXg4KMLWJ0yY4OuPiWi7E23P1KlT/eAoPmdxtHXe48hSGdlm56gN1HvEs/XrtH3YY9gG1vOkK1asqLguj92jC2gf6Hc///zzsluabcZCvSySflQlICEd4TGRQQfRv39/X3mp+E8++aRft8k+/0aMGOGuXr3qr2Y6c/DgwaW4FoctI0BGgmGgs2O6NYxn+6RDehZMSBMfoW3xbBuOIjEWpsLpVO18uJ08eXJS2Nq9am2rTfPiEaNzJfB8rInk2Q8dOlSWLHlEQMfnGKQwcg/zG+9LSJehLPzj7Nmzrl+/fn497a1bt0rpUH4ILhskNZO3CS9EeiikGJBSzojsOFg+GdQqZBOgk407QQa9eKHhDd94HXN2avWdsUFROACiPaRdHD16tLt+/XpFQuQTgUibppAmYHWedxVkm2lGHfloo0I6XH6Rx+6xR+wy1C1wxCONZnjkkUd8O5FX4Hfksmhm3iWkI5ompOl8qHg0bHhZ+M0avzNnzrgXX3zR9ejRwx07dsxfjfBl2oa1UFxPCKdMYm/Qpk2bvNgdNGhQ2RTOjz/+6EVmKDRMSHN/vON0YAjVn3/+2T377LNlHajFRZDiOSZeOE2OwP7444+jJ67/Jx561tXy3KRNYGoeL3LckZs3EWMPw08//eQHJHinzENuwhsBt3bt2tLUtE3vIwYuXboUJqP9BghYPbF6iSeRJQH2cgt1jX/NbFStzofCi0dgqpK6wzIOypv6Sn3g5RfWBFJnJaSrFzZ8bAaBgSqzDAxsrRzZxgP06ilWP0v7xAA+HhQh/BCAlBnTxTYbRXw6a9ovCenqbGWb1fl09LNFhLQ5IVgCR39rIY/dm7PPBmj0vZ988om3ybCdUFtrdPNtJaQjXiak6dxt9EflonMwrxm/6+kQrFEMK6eln5q6i7Lif1oaTz31VJkRcZIpb4zAPIeI1jCfYXq2Purll18uieDwfCP71jhYPkiLzpOONh4BEyfOozEh/j//+c+yrCDw6mFddpF+VCVgdQq2CFsEDvWIJUaI6y1btjRVSFtdCGdPLIM06KzHDhvzeD+0H7tO2zsE4INQxgNtgw8YPvHEE74NwxHQTCFNncGGGRzFgTzYjEZcjvyWLcfEyn/LNst5dLZf1lfW66TAqcBsNDZlesSY5LF7E9K8A8NXO2gPsEfsGCfgl19+6W1Tba3RzbeVkI54magLOx6rsLbkgt9xh4A3jZcEYq8elTWsnFahWWZx8+bN6O6VP61hDdOojPXnEeKYZyqOY/dtZIoXo966dav3RiG64o4yFNLcnxePQk+1eZ7jkbV5smgsGBzwG88knwvCa88yBGYGFJpDwOqUlR9lyfo5W+dOOXKuWWuT6TSyhBdPxOxN+Lk06gGekw8++MDb2fz585vz4J00FezeypItbRedJTZk7RkefwY0jQZeEmXZW2pQZGkzm2CfMiQ/vFeCR/qll14qm8mz+NreISDbvMOiM+7lEdL0l/ShDz74oLcfZpvCkMfurf+3doL2mEG3vcht9U5tbUi4/n0J6YiVdTx5hDTCo5oXJhTBVqFtiiW6fcVPq+BhGhWR/u8AcbJEp923qJDGqBcvXlzWYZtR2jYW0vbSob1UaF9osN/hc7CW2tZpWXpsMXg64bgRCa/Vfj4CvETIgAu2L7zwguNF2TCsWbPGn2MpT6OhHuGVdQ9bHhS/2JsVv1WPM4uArTCzwHStLZmCB0uiGLjWO3CvxbDWoCjrevLEcq54hiorfqsel2127pKvV0hjL4sWLfKOKPo/BsVxyGP39v4C7QTLvlj+Ffap9tJ3s5wncV47+28J6aiE8wppBCbLJRAl4bpAkk2JYFv0z4uMrLeuFVJpZF2DYZGPzz77rCKKLe3A8xgaUEXEjAP2Eky4/tqiWuMQC2nzQNtaSpbGxC8ZkoaJdLz8eLoYlPAc+qMcRri5W9au8kWVeFaFu9h6vKyZjTw5oZ7Zp5VsWVS915NHxB9fnImFfr1ptEo87B17wZuELYUB7pyjbWg00F7RboUvFtebJh01dv3666/Xe0lLxpNtdu5it76SbVbg/Se8xdgLy6iy+us8dm99cbhk1e7PuVdeeaVs9tjOaVsfAQnpiFNeIc3yDDp8KiidVq2XpTAKPmHHyHDUqFH+pSoqMsfpqDgXTsHmEdLmwYu/ycu6RZZIkEfiFAm8YMgLlkwR22fRaPRZT2vrrWIhzX24Hw0CHSkDjvAlQ8uHecv50gcetNQb/xZX2+YQsAadOnj8+HFf/xDR9h3plCjjznznlMEOS5hYepHVyBM3r/AiLeo+HQTrexGAmo2oXd7m9cfOwpd17fuzqQETqcJ7/fr1fm38M888U/WFXuLmHRSxRItZKT6PyfIh/UGW2mVJjKK2yfJC2n5mJmrNJsk26yuLZseyss0S0sza8pED2lfWLVdrX/PaPX0wGgAtgCZAq4QfRSg6W91sRh0xPQnpqNTyCmkuN+NAHKf+xcsyTIyk4oZLSkg7j5BGkGd9Vg9RwtIM4hQJxiWVZzuWEtLmYeELIyw7SXnLEXB8XtDSCbeIA5bBxN/LLvIMuuYOAfP4hqxtv5rgCet6tYaXDoBBIfWuljca+7B725br3njjjcL19c6TtsaeeXyNn22rDUZsAGtxU/Zr9BDEiLRa3mhrryxN29J584d/FGoTKGqboR3FfU54V9lmSKNt9+vpN8OyCsvQbCfcxoPiPHZfTR8w08xATKEYAQnpiJtV/FDQUrnj32GFZj0TX0Cw7yAzmmQJBX++kzWBoaHY7WgsecGKqWsMBY8NL+iwBCMchVrHlErD0gq3GAvTQSyLoBPlH0sr7OWjMG7efUbLrKlF3JJne4nIRrpZHTH5Ib4t8Ujdl5cYeamCP/Dy2GOPVQir+AXFVBo6lo+A1cGw3uI9RGBlBQaBTz/9dE2PtK2PryW8uI91HtRV6hRrA7k+tIOs/Oj4nwRgRdthL/mZ3WObWRw5zppI7LmaR5p4vNdAmrUGRdZeYe+0gywh4ho8Xwr1Eyhimwgh7KeWR1q2WX85NBrT9AT2kPUv7NutLcyKG+oO8pbX7tEq9MfUE+4hR1WjJfzn9RLSzeGoVBog8P333/tBCI0Ihh4GpvrtO95ZQj2Mr30REAEREAEREAERuFsEJKTvFmndJ5OALRfghQf+YIt50FhjyVo/XnCKR+KZiemECIiACIiACIiACNwlAhLSdwm0bpNNoNYfcWBKmWUwRdd3Z99ZZ0RABERABERABESgOAEJ6eLsdGWTCOCB5ruW/EEb1kjb+jDWWPKVj3jdeJNuq2REQAREQAREQAREoCECEtIN4dPFIiACIiACIiACIiACrUpAQrpVS17PLQIiIAIiIAIiIAIi0BABCemG8OliERABERABERABERCBViUgId2qJa/nFgEREAEREAEREAERaIiAhHRD+HSxCIiACIiACIiACIhAqxKQkG7Vktdzi4AIiIAIiIAIiIAINERAQrohfLpYBERABERABERABESgVQm0nJD+9ddf3fPPP+/4RvGOHTtatdz13CIgAiIgAiIgAiIgAg0SaDkhffToUf/npvmjH3Pnzm0Q35+X8yeu2/ufsOZZ23sem1IYSkQEREAEREAEREAE7hKBlhPSbeGRLiKkz5075/7t3/7NLVy48K4UtYT0XcHcoW7y22+/uY0bN7oBAwb4vybZtWtXN23aNHf69OnCz4F9TZo0qfTXKe2vVMbbb775pnQP/rIlf71y3Lhx7oEHHnD8SfjHH3/cbd++3d2+fbsUTzvVCZw4ccKXH+UIb8qV8v3999+rX1jlLOUUl138m/Km3C388ssvbvny5a5Xr17+Wmb/Zs2a5S5evGhRtK1BQLZZA1AHO0079t133/k2jvYNzZAVsJMFCxb4WXNsDTvCnrCrVMhj9+Tjiy++cMOHD/ftLO0t7e7BgwdTSetYnQRaTkjXySVXtCJC2jzjzfKK18qwhHQtQq11no56zpw5SZFEw33kyJFCQIoI6Q8//NCZ+AtFGh3OunXrHEJboTqBAwcOlGbaQobsM1gvKqbzCunLly+7sWPHJuvVsGHD3Pnz56s/iM462WbnqQQI16+//toL19Aus4T07t27M+14ypQpZQNWKOWxe9rRt956ywvoMC/s0/5qqWvxeichXZxd6UoJ6RIK7XQQAtu2bfMNKp6JH374wYvVa9euudmzZ3sRNGPGDN+hN/NxTGQPHTrUXbhwwSfNzMzAgQNdnz593K5du7zgo/NByA8ZMsR7Y/C4KGQToNzGjBnjvfmrV692N27c8J58eDIoopPcs2dPdgIFz9DxdunSpcy7tmrVKl+vGLhfuXLFp4wnbeXKlf74kiVLNDCqwVu2WQNQBzpN24UN4hSYOXOme+2113z7miWkGWhOnz7dffXVV6W20OwYW9u7d2/p6fPaPbN+3bp1823tt99+69sI2gqzzZEjR5ZstnQT7dRFoF0I6bNnz7p+/fpVXbNsHtylS5eWHiw1TTFx4kSX6nhTnpVwermU6P/tMHrbt2+fGzVqlO+g4hFcuN7YhDSdP9PR//qv/+qNhe0nn3xS6jjw1gwePDjprQnTj/OVmuZ79dVXM6dKeX6MkSlVG23+y7/8S8NrpOG9c+dON378+JIHsXv37n4aKpy2tcYDz1RqOopRN41C3KlyHeWX8k7yHDGXuMz0uz4CJmipw4cPHy67CPGDuO7du7c7efJk2blGf5hACDsRxBhlyxKEOFg92bBhQ3xKvwMC2AUd9bx58yo8z3CFb9huBpcW3rVOPBwUYesTJkzw9cdEtN2ANmzq1Kl+cBSfszjaOu9xZKmMbLNz1AbqPeIZLYGmoO3DHsM2sJ4nXbFiRcV1eeyee9M+0O9+/vnnZbc024yFelkk/ahKoF0I6atXr7oRI0a4yZMnu5s3byYzzEiMgt68ebM/z1Ql64boQKiY4T+EWCy6+B3GYT+OE96YDj5L0HFtLKQfeugh9+STT1bkJ/QGFRHSiJ5XXnmlIu/kgZHu999/H2bbT8+YgI6fN8xz2UV1/kgxtHvgEaNzJVA2rIlk9Hvo0KGy1DFoBHR8jkEIz2PppbbVyqvsJvpRlYANXFkPfevWrVJcyg/BZfW+mbxNeCHSQyGFPVPWiOw4WD7v1vKn+P4d5TedbNwJMujFCw1v+MbrmBt9NhsUhQMga8dHjx7trl+/XnEL8tloG1SRaCc7YHVettnJCvb/HqdRIR0uv8hj99gjdonOwk4t4JFm+dwjjzzi24m8At/SafVtuxDSiGdEdNgAU0mY8mXql0AFQjQzzUGgk6DDf+aZZ7wHGoGGgPvyyy+9IMvyhnKtCcIsoYBnhetp9OkwSJd/eGMRe/FaJTMOOiymxhENxGealWPz58/3eQ7/Mw97LZGwadMm/9x4oG06nMr//vvv++dHsHIvgnkTEdIfffRRSSQhxv/93/+94U4M9qyrPXbsWOmelA9e5LgjN28i5RiGn376yb9IhneKkTCB/PMclOfatWtLU9M2vY8YuHTpUpiM9hsgYHXPvJTUd+pqPABrZqNq9TgUXjyCDZBZxkF5IwCpD7z8gp1h87VspAEUneJS+NgMAu0gswwMbGl77B8zYQzkmxFoZ5gGjgdFDMoQgJQZ08U2G0V8OuuePXs23AY1I//tOQ3ZZnsuncbzZlohT9tqTohQD5GTPHZvTjwboNH3MluOTVobwVZtbbEybhdCmqwjNq2xt6lnGmQTu1S80JuBCEDUppZxIN569OjhBV8KSy0hbZXuueeeK4k90qGTR9BaPi1t8kZeFy9eXBKYnGNqnA4uVTmtwUyds3StY4o7LM4jNl5++eWyqVKei3ywTjEO3CfkF59v5Lc1DlZWpGWiPh4BWx5DD6SVN8/5z3/+sywrCLy2ynfZjVroh9U92CJsrTHlDW7sasuWLb5xzdPYV8NndSG1Bo8GnfXYYWMe71ezkWr3bZVz8KFNwrlggw8YPvHEE94BwXKLuM1qhA11hnaGwVEczMERl6H9li3HxMp/yzbLeXS2X9ZX1tu20s/zsjBOptAbDZc8dm+aBkcfX+2gPcAmsWPWbuOAxDbV1harce1GSFOxrJFFgD711FP+syys68HLQgdvnYF5sK1xTm2pIOa9jtHUEtI2RUl+qLxUZv6xz7HQc07aYd7De1nlTVVOazBT5ywNBAieutTz2bFwwEA+sp6b+xhfSz/vFgZbt2713ihEl+XBtqGQJm0Efeip5no8z/HI2gYMNBash+U3gxY+FzRo0CC/fp4pT4XmELC6Z+VGWWJnts7d7KNZa5OtXqaEF0/EDEv4uTTqAZ6TDz74wNfZ1IxOc0h0jlSwbStLtrSTdJbYkA1SaUdoTxoNzIqxLjo1KLK0mU2wTxmSHz7Dh0f6pZdequrgsOtbeSvb7NylT1uITbCtFegv6UMffPDB5NeL8ti9aRFrJ9AJDLrNEWn1Tm1trVJJn283QprO20QX3ko6UjxjeDTpAKg0ts7POgerFKltlqAEgwmFWPiFiD777DNfgeO0UyNDjCIlUq3ykvc4WMVNnbO4dn2ch/B3LKSNoaVhW+6TyqOdr7XFqPG4h/eO92Oe9tKhvVRoX2iw3+E9WUtt67TCdClHfQItJNX4vs2UwPaFF15wp06dKkt0zZo1mQOysoh1/KhHeGUlY8uD7L2IrHitfhwnAzbDzALTtbZkCi4siWLgWu39kzz8ag2KstIiTyznimeosuK36nHZZucu+XqFNPayaNEir4no/xgUxyGP3ZtzkHaCZV8s/8JBaQEnIeea5TyxdFtl226EtAlLRDTeMSocXki8K3wKBhHNtIQVPsKQL30U8VTWEtIIdUZrLMtguQEeO/7xBQ97+zasIG0lpG3AEHvAw3uH+1kvbvFJHb7h2oiQtpdgWE7DWnGEtQVrHGIhbR5oW5pC2cYvGZKGiXTyh6eLwQoiT3+Uwwg3dxu+A4DdhcHW49ma2/Bc3n1s1T6tFC7lqScd8oj469u3b4XQr+f6VorDoB97wZsU2iUM4M45Ot1Gw5kzZ1z//v19R0w9yRPoqLHr119/Pc9lLRdXttm5i9z6SrZZ4eeff/b6A3thGZVpnjh+Hru3vhhHG7YYBs7xQYMsJ1wYV/tpAu1GSJvnhM4AwYqHkgJmHTDeaaYTwxfXrBLxolv48lv6McuP1hLSJuoRAXiFEbTVQhEhbZ4HhClLUFIjTu5p32X961//6gcNWfGICzOEKtO4x48f92myZpHlEYw2GxHSMMb7TdrknUCjz6yBrbeKhTRx8CqaF5+yDF8y9Ik45xmTBss+qAepN/4trrbNIWANOrZGXaGxRhzZd6RToow7MyhjsMOLiSy9yGrkiZtXeJEWs0/YNut7EYCajahd3ub1x87Cl3VpV2hfsuwe3uvXr/dOAl7arvZCL3HzDopYosWsFH+lDUeE/iBL7bIkRlHb5GVdvsfOzETWskbLgWzTSNzdrZUt21Rg1hbHGe0r65arta957R4BjVhGD6AL0BLhd6Rtxj+VLx2rTqDdCGlEGS/FIPhCD6x5WTkeTvEibp9//nkfn3Pxv7BSmGc3jhP+Dis2U914wsLztk8Fp2NAYFvg2lRnZUszUss3qr1kFQpSOjc+q2f3j7dh2llp0rgyhZ/Koz1DrW20EIn3AAAEFUlEQVQ9DMN8W3rmYXn22Wf9DAIiKQ4IuKxnRBwwkGrkz1bH99PvPwdBeHzj+sTvaoLHOgLihTYWM6UDWLZsmRfDtbzR1OE4H4joN954o8LDGt9Hv/8kYB7fFMeswYi1T3ZNyn6NL4KYdiT8zKWdC7fmhLA0bUvnzR/+UahNgDaziG2GdhT2C/EdZZsxkbb7XU+/GZZVWIZmO+E27sPz2D2OSV5cDNOzfQbcDMQUihFoN0LavM8UbDgNaZ7b1LQDwpGpD5YA0PFapWAbdvL1VOZQSNPQ4Akmnccee8zxR0fCtNkPP69XREhTXHRkiPI4/bhDo2HlZayUuA+NkDSZFsKriADln/3hlqw85qk2jJYR5KQLA3uJyEa6cb4tbcqI+LbEw46HW15i5KUKWMA85h2/oBheq/1iBKxe0YjCOzVIjFPGI/3000/X9Ejb+vhawov0rfPAhqlTrA3k+mremDhfrf4bVvzlMnvJD5bYG7aZxZHjrInEnqt5pInHew2kWWtQFApp6hPtJNfg+VKon0AR20QIYT+1PNKyzfrLodGY9WiPsA+3tjDu/+x3LKTz2r1pJuoJaWL7clQ1WsrOtRsh3fijNC8FBCEV7O23365YcoGwZ3lDXKGbd/fWS4k/KoOYoxHB0MPAVL/NVGQJ9TC+9kVABERABERABETgbhGQkE6QtlEhXuDwrwAxuvzf//1fvw4ZLzh/XEShcQK2XIAXHmBqHjTWWLLWjxecNHBpnLNSEAEREAEREAERaC4BCekET/srbDadEm/xVn/44YeJK3WoCIFaf8SBKWUGNSz/URABERABERABERCB9kJAQjpRErzNyifeWFLAOj8T0qzfZc0xa8wUmkcADzTftZw+fXrZenHY85UP1n6al7p5d1VKIiACIiACIiACItAYAQnpxvjpahEQAREQAREQAREQgRYlICHdogWvxxYBERABERABERABEWiMgIR0Y/x0tQiIgAiIgAiIgAiIQIsSkJBu0YLXY4uACIiACIiACIiACDRGQEK6MX66WgREQAREQAREQAREoEUJSEi3aMHrsUVABERABERABERABBojICHdGD9dLQIiIAIiIAIiIAIi0KIEJKRbtOD12CIgAiIgAiIgAiIgAo0RkJBujJ+uFgEREAEREAEREAERaFECEtItWvB6bBEQAREQAREQAREQgcYISEg3xk9Xi4AIiIAIiIAIiIAItCgBCekWLXg9tgiIgAiIgAiIgAiIQGMEJKQb46erRUAEREAEREAEREAEWpSAhHSLFrweWwREQAREQAREQAREoDECEtKN8dPVIiACIiACIiACIiACLUpAQrpFC16PLQIiIAIiIAIiIAIi0BgBCenG+OlqERABERABERABERCBFiXw/wHGGL2tDp4GOgAAAABJRU5ErkJggg==)You can achieve these results or even better on this dataset with test data:![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAvkAAAETCAYAAACsixkBAAAgAElEQVR4Ae2d+88VRZ7/91/xBxNj+IFsjCGGACEDGcIlEIkQJjCKCZcosIaAcpF1EFkgAdSBZWZZWCIoAQIz4oDKKoIuSkAhBBgiKPdwVRkQBQlQ37zK7+dMnTrV5/7wnPP0uxLoPt3V1VWvT1fVuz5V3c+/OAUREAEREAEREAEREAEREIEuReBfulRpVBgREAEREAEREAEREAEREAEnka+HQAREQAREQAREQAREQAS6GAGJ/C5mUBVHBERABERABERABERABCTy9QyIgAiIgAiIgAiIgAiIQBcjIJHfxQyq4oiACIiACIiACIiACIiARL6eAREQAREQAREQAREQARHoYgQk8ruYQVUcERABERABERABERABEZDI1zMgAiIgAiIgAiIgAiIgAl2MgER+FzOoiiMCIiACIiACIiACIiACEvl6BkRABERABERABERABESgixGQyO9iBlVxREAEREAEREAEREAEREAiX8+ACHQSge+++849/fTTrn///u748eNNzcXPP//sXnrpJff444+7PXv2NDVtJSYCIiACIiACItD6BCTyW99GymEXJbB371730EMP+X8bN25sailPnDjhevbs6dNeuHBhU9NWYq1JIHye7LniWCowCJwyZUrh+SM+vznemeHWrVtu1apVbsCAAUV5yypHZ+ZV9xYBERCBVicgkd/qFlL+uiwBefK7rGk7pWDtLvLv3r3rXnvttSJxnzVYuXfvnvv73//uJk6c6B5++GHX7EFypxhQNxWBgMDnn3/unnzySbdv377gqHZFoDYCEvm18VJsERABEWh5Aib4q/GAm1e/sz35Nvv0zDPPuHPnzrn79++XcEbcf/HFF27UqFFFgwGJ/BJUOtDmBHimGeRWU4fbvKjKfgcSkMjvQLhKWgREQAQ6g0A7inzL8/bt2zORnTlzxvXt29d77+fMmePefPNNL4Qk8jOR6USbEpDIb1PDtVi2JfJbzCDKTnUEfvjhBzds2DC3fPlyd/78eTdjxgzXvXt398gjj7jJkyf7Y2FK5iWk4fzpp5/cihUr/EupTPUz5U8acUBQWLp4VAYPHuw++eQThzcxFa5eveqWLFnievXq5YUHL71yH+4XBmu8bSkCa+fJX1ZgnfKmTZv8/bmGclLG1Mu6JpQsbbblPEG//PKL27p1ayFt+I0dO9YdOnSoyJNq3l7W91POV1991ecDfmPGjEnmJas8rX6cMvJsff/9954D5aOcTzzxhNu8ebNjWUkYYobYBz5wSgWen08//dRzg7cxjJlzLc/5n//858IadeKTH67Peg65zp6Dcra3vJltO9uTX02eYY2wN1ZWl9g2K1CvwnrP+wHvvPOOox6mQrX1npmJw4cP+/YGO1I3SZu6TbnCYO0bzyLniMPghmuYxSCdOFg+aHeIR/xU2vF1nf374sWLbtGiRb49Jt+0n8uWLXPXr18vylrIpOiEc365VtyO8kz069fPnTx5sqiNg0tcjy3tavsT7k+7zvsjZhdrl+k34lBtm2J1AA5Z/2ibyK+CCFRDQCK/GkqK03IErFFmzaI1smGjiBC6du1aId8m8pcuXepFUhiX/Tj+V199VXhxNY77xhtvlAg9hEEqH1wbCxATJpZu3DkVMu2c7+BnzZqVbPBToizVSWQJPTqpqVOnJtNGeCIQbMmECcE+ffq43/zmNyXXjBgxwl25ciXMetvu0yEPGTLED9BMjJmt4LJt27ZC2eAyd+7cEh7E53n45ptvCnHZYYDAM2jphdu48zbmYRzbJx87d+4sSjv8Yc9Blu3DuHaf1PMUxuuIfVhbmbK25fJldSmuY/Xm9cCBA37wmspLimW19Z56RH3Cbqm0qYehM8Dat5kzZ7oXX3yx5Bq+yBWKydOnT7uhQ4eWxONe06ZN6/QXqrPsQRkoS4pJbFNjwjMTB+LG7SjHHnvsMfe73/2uJP24Hlva1fYnly9fdiNHjixJl3Ig9uMvmlXbpli9TfGwY3E7EbPQbxEICUjkhzS03zYErFGm4aNzQ5QjoBD2L7zwgm98QxFkIp/4CFXOEf/HH3/08bt16+YOHjzoy08aiH5E2meffebj0UnTIT333HOuR48e7tixYwVWFp+OAwFnYpdO+4MPPnDvv/9+IW68k+qcwjh4oXr37u0Q+iYCyPfRo0e9hxeBlhWsw0iJE67h3vBgVuDChQte0N+5c8exXIKOCi8ja6MJJgSJj6eQchnvSZMmefGyf//+rKy01XETntiTJSGXLl3ybHbv3u254OWFEwHBTzw892Z3PL7vvvuujztv3jzPyQBYfJ5ZXqyDIR55RBrey3BgCnPysmPHjoJXk/g8T9gnzIelb9tKtrd4bM225cR0GL+Z+8aa5yrrX7l82TPMthlh8eLFXhjyLNtMCTZZt25doX2w+9RS7639oU3hRUqzO4OE4cOH+2eIOmUhbN+ob5SP5wqvPnmElS1r4hjtA88Ez53NOPA88lzyfO7atcuSbqntli1bfFmoFzAhZLWbxoRnJg7wSYl8OFF+Zn+wFzZlNhZW48aNczdv3vRJWdrEr9SfkE+zwfz58wt1lnyvWbPG349PI9+4caOQTXvOq2lT7CJ7trPab4unrQiUIyCRX46OzrUsAWuUeUmPr9SE4ciRI76jpiG2YJ0s8RFtYbCOxhpTxD6in2UscbC0ucYC19F4M+VsHZWdq7RNdU7hNeQVrzIdjy1RCM+X2y8n9Ojc6OQGDRrkBX6YDgMaOis6PBsomRCkI42/9oDYIK7xC9Nqx306ZOyPjU3oUQ5EAoLMRCdCH6HN8gnOhYHn4JVXXvHx7VzI8Ouvvw6j17Rvz77lI3VxOdvH8S1f5dKLr+mI37Xk2e5vQohtVrA4PKPhv5RYXLt2bUEU2qA6K13yW229X716tY9r9SlMk6U3OA54Xqz9MBtTP+NnhQEI97Uynz171i9LYZBoM2+WPkthBg4c6EWpHWulLYMPyjJ79uzM5W2WX2OSshssUiKfevzXv/61iIvVTZwnOFEIlnY1/YkxHT9+fJGQJx3SZmaPGQT6CgvVtikWn609t12lXQ3Lpv0HR0Ai/8Gx1p2aSMAa5VSDj+hHGIfnTOSHx7KyY6I/FATxfjiAoDGmo6rHk53qnOJ8MfUbrrN9/fXX/axC3KHH15UTTcYvyxts15I/QjkhaHG7SmfEMxILhpgtv030x89G+DslJFiacfv27VSSJcdsvbK95xGmXU6U12KTcrYtyVAHHqglz5YNE0L2nNrxcGtxQnbsp9oChL0tj2GpFgNhhLl5x+N0q6333Iv14QjyOFhdDO2ZOhZfZ79N9MflC3/X8sxZug9iiyhm+SMc+ceAmfXyoRfc8mFMUnbDxnGdTR2ztNavX18Uv1zacX9ifQnr91PBnrewPay2TQnTS6UTnte+CFRDQCK/GkqK03IEyjXK5mkJOwNrmMNjWYWyxjXsJOP9WOSHy32y0k0dL9cRhfFtChvRYevEESPlvI3lRJPxC72H4f3sWvJHKCcELW7YqYVptdt+tR2yMYyfjfB3SuRnDaxiTqznz3rPg3uEojC+thablLNtnG5H/q4lz5YPq6v2nNrxRrYMnmkvGEwbfwbZ8Tpr7lltveeZwqNO2xQHe45Ce6aOxdfZb+MWPnfxfquKfCsDywV5iZWPG5B32jjEfujIMCapNhxb1CLymVkJ45dLO+5PrC8hjVSwZzJsD6ttU8L0UumE57UvAtUQkMivhpLitByBco2yTQGHjbA1zKkOIi6cdZqpqfU4Lr+JR8fEEpewU0rFjY+lOqc4TvwbYc9aUO5p63LjOPy2coSdjcXjyxWjR4/2Mx7x8iXKYGtL7dpyQrDcfex+7bSttkM2JuHa3nLlLLdEKnUdzy82Dtf9Es+e/VAUxtfXYhMrR7n04vQ74nctebb7mxBi2xGBusD7Poh96kv4xZda6j1eXzzVqbXxtlyH5X7WflRjYyuvtW1he2fn2nF76tQpv8wtnvkwJvGAheO8VxSKdsqd1bbSfk6YMKFo0GVpp/qHuD+x5VGkETtZbLlOnJdq25TQXvZsWxscntO+CFRLQCK/WlKK11IErFHmhTOWTdA50sD+3//9n/9aQ9zIWkeYasTjgtk6eL76wKcKEUHlgn0hgpe5eEHP1mCzZhsh0MiLtwgM8szaUcpHYPvxxx97L2I5cVNONMGLNbyISDpNe/GWZQkbNmzwL6aFa83LCcFy9ynHrVXP1dIh2xpu/lIrHr9wDX9cPpjzSVWY85lSXp7GlhyHP+fs2eFae7mPZ4p4pM0LugsWLPCeznKivBablLNtXIaO/F1Lni0fJoTK1QOLW2kLB5iHLzpzDZ9S5eXy+KsmtdR76jFef9oUlteYPfmaD+/bcI44Fqx9K2dji2uildkG3iMKByIWp1W3K1eu9F8dgjH1gEB5eMbjNtwcExz/8ssvfX1gQEBdok7F8XkmWBvPc2UvyvMyMnWVARe2xg4E411Nf8KLztOnT/dp0FZYnWWJES/4kjbniWehljbFrrF3nRjAxI4Yi6OtCFQiIJFfiZDOtyQBa5Rp3ON/NLLh5x8pQC0in/h/+ctfCsti4vTjzoTOiallW0YTxw8FSLl823V0ShZM+Ni5cIt3MfxEo4m1ME68H+al3Gfg4uUJlnZKdFgew3xb/ttxW0uHzHpdvqQRc7bfpBUGmPPlJjsfbmMRyfIQxF8YJ9wPbWH2Cc/H+6HtzWZxHPsd5yUsQ0ftW57KPUfVlDNmXm1+K6Udfzq3lnqPmMz6dCrtVZy2tROhjcuVA2b23o7ZMNyWY1ou3Y4+h63CfIb78ac/4W2D5DAe7S5e9bhd5nkP44X7fP6SumjBeIdxbD/Vn5T7dCoDOc6HoZY2xa6zQaTlw7adUTctT9q2HwGJ/PazmXIceF6s4WOLJ/3555/3fyjGvEIGq1aRz/VMo5Me6Yb3iTsT7pGKjwhnnWk4pVuuM7F7hB0y3qCPPvrIC0MbRPASJn90K/5jS5VECumHQo98k5/wD3hRVtaM08GEwdJOiQ7yS9phvsNr222/1g4Z+2JnPjlqNrRtSnDG8bErf5CN5y18bvHcMwtk65TNNsR76aWXitbkm33svqltaHuzWSoexzpDSFieyj1H1ZQzxbzaZ5BPxrJsxl50xjYMyrL++Fi19Z77I/TxtPMtdoQj/5gtS6Vt7USqvmWVhTrLd/VTYr8c06z0HsRxyhn+sTeYwAdOoSfc8kLd4V0J6gJxsQ1fHaN8cbucEvnU0bhNJm3jHdYH7pHVn3ANf0Ax5A33VLtM3FrbFCsvs31xH9QZddPyo237EZDIbz+bKcdBo9xIhy6QIiACIiACXZMAIj8W/lklNZGv/iSLkI63KwGJ/Ha1XM7zrUY55w+Aii8CIiACZQhI5JeBo1O5ISCRnxtTd62CSuR3LXuqNCIgAiLQTAIS+c2kqbTalYBEfrtaLuf5lsjP+QOg4ouACIhAGQIS+WXg6FRuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKqcIiIAIiIAIiIAIiEBuCEjk58bUKqgIiIAIiIAIiIAIiEBeCEjk58XSKmfbEvjll1/c1q1b3eDBg91DDz3kunfv7mbMmOHOnz9fd5l+/vlnN2XKFJ8eaWb927t3b9E9rl696pYsWeIef/xxf03fvn3dqlWr3K1bt4ri6Uc2gTNnznj7YUe4Y1fse/fu3eyLajjz008/uU2bNrmnnnrKPfLII/4e2Bqbx+H+/fvu66+/djNnznS9evUqPAex3ePr9PtXAh1RN0O21dqHeIcPH3bPP/+8bx+sneD30aNHwyS130QCN27ccG+//bZ74okn3LBhw9wPP/yQmXot9Z46TLs6YMCAQp1kX21tJl6dyCAgkZ8BRodFoBUIICIWLFhQaOhDMY7APn78eF3ZrEfkM6h48sknk3mZOnWqo2NSKE/gyJEjrmfPnkmGS5cubVjoHzp0yPXv378k/ZTIZ2C2aNEi9/DDD5fEl8gvb0fOdlTdtDvXYp/333+/MKAL2wgT+wcOHLBktW0CAcT8H//4x8KACs7lRH4t9f7atWvu2WefLamTZtdZs2b5Z68JxVASOSAgkZ8DI6uI7Utg586dXoSNGjXKffvttw6PHd6j+fPn+06gIxp8GwCMGDHCXblyxcPDyzx37lyfl4ULF/o8kBfyNHz4cH9827Zt7Qv6AeQcu40fP96LsXXr1vnZj3v37rn9+/c7Bmx49hsRY3gKEfjMsvzpT39yFy9edKSfCthuxYoV/hkaO3as++KLL5Ke/tS1OvYrgY6sm7XYh8H1pEmT/DO0b9++wkCR42vWrPF1c/ny5TJbkwhgm2XLlvm606dPHz9rxmxclsivtd7v2bPHp00bj+AnUI9x6NDW9uvXz509e7ZJpVEyXZ2ARH4ZC+Op+eijj9yYMWMKXhKbMgu9ll999ZXr1q2bmzdvXqGBDZNlKp5RONswpKbvPvnkk5KO+cSJE977t3HjRu8tpXOmI8cDN3HixJJlGzQINPbh1C1T8SyzYLlFHCgn0/vh1KB5DdjGXsDUkg2uJx2F5hEwsY3nlyUVYaDxR/h3RINv4oXnzYI9g5MnTy7x2NvzzxKiO3fu2CXaRgTwjlNn8Z7HS3OsjahXjJnwYKCASKgUaHsYWGgGphKp9PmOrpu12OfmzZtu3Lhx3vt7/fr1ogwzs/PYY4+51atXFx3Xj8YIMBhnII0OwKuPwM8S+bXWe9pf+t0dO3YUZZI24+WXX3YDBw70A/iik/ohAhkEJPIzwHAYkROK3XAfr6Z11FT0CRMmuCFDhrhLly4VpYjwnT59esk5hFHWtP0bb7xRSJvETGAxnc+AI8wH+xyzET/xaVTiOPYbTyKeBQuUgXTtfGobivzTp0+7oUOHJuNPmzZN3kAD24Qtnlga9Fg8Yz/EoK3pbubSCvM6MYAInynreLZv314oGcLy1KlTfqCJeM3q5AoX5HwHoYUz4ODBgwUSDMgRDPCm7oV1rRCpih3E3ejRo0uelaxLsWOcl6y4Ol5KoKPrZq322bx5sx9A8h7G559/7m7fvu12797t24+RI0e6y5cvlxZCR5pCoJLIr7Xef/fdd7494H0atACzqegKlm0++uij3iFH26sgAtUQkMgvQwnvWjjtTcWy5QmxB9U8cYihMJhAZ3rPKibiCWGOJ+2zzz7zgp5zeG+ee+4516NHD3fs2LFCMpYGIoDpQe6BOP/xxx/dCy+8UNJZM/1Pg3Dy5MnCYIFGAi9s3LFzH+7HlD2CjXzYNC8NCp2HBQYsLA9BXL777ruFly1phF599VXfyezatcuia9sgAbO7eXexC8s87KVXG5CFHvcGb+lYcoNgj2eduEf47Jw7d87Nnj3bx7V8MGglzwppAixzsnaDesbsDINu48e23oESdb13795u5cqVheUDpIdQYLYvfvly8eLF/oVf2h8GkTZgpE3Sy31p+4VHO7pu1mofBou0yWHbQD2eM2dOcvY2LIv2GyNQSeTXU+/pr9ECYdtA35+a6W8s97q6qxOQyK/DwlTaWNDY9GrsdUUcMV3KizcW8OQhmGIhxXniEX/Lli0WveDJf+aZZ0pmCohHQ1CNN9dmJsK45vWP84JwZ50hZbXAOkBESjhgsXPm2aJzUmgOARMSCHvsQyNvwg3hz3Quv5sl8m0JUDwzRGm4B888XkKWm9hXW2zQySAvrhPNodB1UqEuIeLx3DPrhQjDfnhfWWLDuup6Rb49K6SX+oeID9f7k5dUPDvWEe96dB1L/nN2taPqZq32YdDI0hyeJbMhW37X+3J+V7JXR5alGpFfa73HoUMbb+0stmSfQbyWxXakNbte2hL5ZWzK1w1Ya84XRaxDtgY0FjR41lmTb546krVlPCzlYd+CCXNLK7UNxbJ14KHgtrRSW/Ly8ccfF71LEN4jFPk24GBNJ95Z8+Sz7p8yh2s5mSGIOYTpss8aX6aKFRonYHY3xjTyCGx7r8IGaOGAsJG7IuSxb+oFWhsgWl4Q9ww8rMPh2cSTjEdZIU0gFm50/J9++ql/B8fWePNiXbhMKp1S6VF7VrDLX//6V2drs2l37OXL8J0h8oKtGWx88803fsYvfLkvnk0svWO+jxhvqw/Nrpu12odBIgM57I+398KFC96Lj42ZnZHQ77jntRqRb88J20r1njpLP4rtmInBgcbsPbbl+mZ8havjaCjlViMgkZ9hETpdOsCwcob7scgnGXsB0bzithTGftutYsEUpmv79Yp8BD7r+Cyd1DYU+cT/j//4j2T8eC2nicpUmnZMIt+s3PjWZk5o7FkawyAsDOvXr/cdAYOvRgMzN3xNJ+XFJ23WCGNjlgPgvQwHrezjha5XoDaa93a5Hs8cDOmsP/zww8IAifyzDpd3euqtPyY6eSbiYC9mhuv9EZHcj/vGwd6/CNuJOE7ef3d03azFPiztwJb0SeEL+gza+IY7z1w8w5x3+zWz/JVEfq31fu3atd5m8Qv6DNxoY8Nlk80sh9LqmgQk8jPsastmqFR4QWgwLdAAp0Q+YgevPS/a4uHEC556GdfEcrx+39KPt9aBc99KwZbN4L0JP6fGdTa4CDtvvHjE5cU/+7oOQg6vn3mM7Z6Wj9C7b+e0bT4Be55Sz5q9IBvOHNWbA2ZvzNub9UzagDUUinY//ggPnl8JCSOS3vK+CgM2BvAMrsMAd84hCOoJNkjg6xtx2rYMK7QdgwGWBbLEIw44JRCGYTsRx8n7746um7XYx9rlVP2zwcjTTz9d9MGFvNuvmeWvJPJrrff081lC3gYMOAkURKAaAhL5GZRsaQoeSps+Z8vyHTxxKeFFUnSQnGP9K0tgUuvXzfPCN62ZrmfWoFywRrwakW8v4DE4oYEn0CGxfptpwrjzRvjz1/qYhaB8sUAI82UdG4MAymlLAsI42m8uARuYhS9GI/DtO/kpwUgO+JoGXnls9d577xVe+k7lDg/RoEGD/EugpJ0KtpwEIfrmm2+WfCefTqmaTzem0s7LMZstYVnFhg0bSr6Tn9WmMAjjBXiWhPAyXsr7Tr1lYB6mDVfqtD0reAgt2KAtfK5IA8cAg/6Uc8Ku1fZXAvXWTZxGtP30I1mzcLXYx9p86jDtuLXhtNfvvPOOF4ypAYDs2BwClUR+rfWeNp1++vXXXy9oD5yMfPSDL2hlDQCaUxql0tUISORnWNQqJpUt9S+rQzYBz/IKvKw01qnwl7/8peilmvAecdq1iHwTY2F68X7oofvggw+8BzGOw29evEXMW6dBObgW4ZiKz7Ew7VS5daw2AnTULOFI8Y6XU4UpmwDhutCDG8ZhHwFp719kefHtGsQJAjCVF60TNUrlt7Z2OmbI4AkHAvaIg4kIuyarjpWzD88Qz5IF6nTWp3MZKGjAZqSyt/XWTZw1Zsssx00t9ikXl/tQZ3k2FJpHgDpoNkxt4z68lnpfrh5zL7W1zbNjHlKSyC9jZZay8Pk5PGh0wixp4Y9j8aJjXIktGTpp+2t4tmzHzoVb4rHMIfyDVdZYxGnXIvK5BwMNBhl01qSJWEdA0NDgBQhFAp/hZIkRZeSb7HaN5YVt6AEkfb4kNHPmzKTYD9MOy6v9+gkgJvisoQlsBln8YTPEX1bAk8+fRq/kybevQsV/PyErXeLjFbTnJDUQzLpWx38dVFHv43aFupkS+DDjOG0OzLM8+cY2tg9L8PDm8hGBOLCkkEE8NqSekz6f2ZUgjEll/66nbsIX5uU8+dyxFvsg9OOPLfAHEFnXHb/Lk10anamWQK0i3/r7aus9y26xHTakbtI/MzOL/rCPHVSbV8XLNwGJ/Bzbn46Bl3QZVMTf0WZ6EM8ug4JynuAc41PRRUAEREAEREAERKBlCUjkt6xpOj5jtgwAkY9n4s6dO/6meB2+//57vyYQLwIehSwvY8fnUncQAREQAREQAREQARGolYBEfq3EulB8pv34ozcI+ax/Q4cOdadPn+5CpVZRREAEREAEREAERKDrE5DI7/o2LltC1pSyXp+/jMi6P8Q+7x/wByQBUKwAACAASURBVMDeeuutopf1yiakkyIgAiIgAiIgAiIgAi1DQCK/ZUyhjIiACIiACIiACIiACIhAcwhI5DeHo1IRAREQAREQAREQAREQgZYhIJHfMqZQRkRABERABERABERABESgOQQk8pvDUamIgAiIgAiIgAiIgAiIQMsQkMhvGVMoIyIgAiIgAiIgAiIgAiLQHAIS+c3hqFREQAREQAREQAREQAREoGUISOS3jCmUEREQAREQAREQAREQARFoDgGJ/OZwVCoiIAIiIAIiIAIiIAIi0DIEJPJbxhTtnZGNGzf6P6S1d+/e9i6Ici8CIiACIiACIiACXYCARH4XMGIrFEEiv+Os8Msvv7itW7e6wYMH+4FU9+7d3YwZM9z58+frvunPP//spkyZ4tPjrxxn/QsHbffv33eHDx92EydO9H8d2f4y8ieffOLu3btXd17yduGZM2e8/bAj3LEr9r17925DKMw+zz//vLO02fL76NGjJWnz165XrVrl+vbt6/Px+OOPuyVLlrirV6+WxNWBNIGOqJt2J7PPgAEDCvWTfWx269Yti1bYYjfshx15rrBrVtzCRdp5IARoHz/99FM3atQo/xfl+evyY8eOdYcOHXLU2ziY7VU3YzL6XSsBifxaiSl+koBEfhJLwwcREQsWLCh08qEYpwM4fvx4XfeoR+T/7W9/K4jHMB+I/U2bNiU7q7oy14UvOnLkiOvZs2fSnkuXLm1I6L///vt+8BXaxvYR+wcOHCiQPX36tBs6dGgyHwiR7777rhBXO2kCHVU3udu1a9fcs88+m7QPNp01a5bj/hYY8D/55JPJ+FOnTnWIRoXOIcDgncEW7aTVR9tSL/fs2VOUMdXNIhz60SABifwGAeryXwlI5HfMk7Bz507fOSC8vv32Wy+kb9y44ebPn+87jLizb0YubAAwYsQId+XKFZ/kpUuX3JAhQ1z//v3d/v37vRjFO8UgY/jw4d5riIdaIZsAdhs/frwX4uvWrfPeWBjCkwFbLMSzUyo9g4ibNGmST2ffvn2FwQLH16xZ45+h5cuXFy7k+Ny5c90HH3zg84E3kecLWyJAtm/fXoirnTSBjqybCD/sQD1H8BPC+tavXz939uxZfxwRiS0RkQsXLnQ8Z6E9Ob5t27Z0IXS0wwkcO3bM9ejRw3vxaS+xIzajnlLvadvNxmRGdbPDTZKrG0jkt4m5qfgrVqxwvXr1KvEG0BnQuIchXhLAdVxPOnGIp5wrTSXatLDlhfg0YuQjXN4R30e/ayNgYhvP79dff110MZ0CnUPY2RdFaOCHiRcGbhZMdLCsJA5fffWV69atm9uyZUt8Sr8DAtQNBNeiRYsKItxOw5X6EwpxO1fN9ubNm27cuHHe+3v9+vWiS1gS8Nhjj7nVq1cXHU/9wPap9iQVN8/HOrpumh127NhRhBlx+PLLL7uBAwe6ixcv+nMnTpzws0OTJ08uad+tbrK8786dO0Vp6ceDIbB+/XrfPmKLOFDvaRMY6FcK9kzEfX2l63Q+3wQk8tvA/ghzplzpfLP+hRUfQYZXMBV35MiR7vLly4VSl0ubxidehoEnAm9uKm2OSeQX0Da8QydOZx530HjqEINm42YyN29z7F3Cs4t96WjiYPkMn8E4jn47L7IZDB08eLCAA68ey2jgDV/ek0BA1hM2b97sBcNTTz3lPv/8c3f79m23e/du/wzF9T4rfRMSCBOFbAL2zHdU3WS5FM8EDpQ33njDz6gxm8bSvUcffbSoXTabhbMvePJPnTrl35+hHR82bJj74YcfsgukMx1GgLaaQTZL9eLAMc6FDpU4jv02O6tuGhFtqyEgkV8NpU6Os2vXLt95z54929HQE/DkMpWL0AtFHssrWGbBcRp9vDc0+BcuXHB4ehASf/7znwslsmU2nCMOcbmGa0mDF73OnTvn4+Pxnz59emG5gc0KEJ8lARL5BaxN2TEPnXl34c0yD3uxDt78q6aDqDZDTOsjCmKPPcIUgcpyjnDKmRc6p02b5q+RyC9PGT4280I9Y3aG5TtmR7aNiDEGDO+++27R84Et58yZU9XLtLbsg1k5lhgoZBN4EHWTtv65554rej769Onj4hfdqf/h4JH2mr4C29uzxWwgeVZ48ATMQRL3sV988YV/+baaNlx188HbravcUSK/DSxJI473JnxxjmzbWr9Q5NloH9GNkAgDIn7QoEF+Wp/pfZvi5xjnwsC1JtxJk2Ad27x580qWG5AHGqtwwBGmp/3aCRhvhD2imw4exnj3EP5M5VfTQVR7Z1sCNGbMmKI1olzPAI/1/9wv659EfnnS8EHEU49tYARLPO/MvrGmvhGRT51laQ7phTbiNwOzcoFrmbWLvcTlrsnzuQdRNxnUU8+p72ZP9leuXFn00i1tLyKe2RuWgll82gva7ldffdWfl8jvnCeWdpU21WyY2oZ9eJxL1c2YiH7XQkAivxZanRTXPAH//u//7r7//nufC7b8psEwEc4JGovQqxNm2daRmpBg+pb9eMrZrkGwk741QPY7tfaaOMSVyDd6jW9NSFinQOdNJ26fOCxnj3rujg3x/mW9pMdn+8JPLjLTw7Pz3nvveRGxePHiem6bm2sQ+WZLttQ9PquHB97qJjMliIJ6gi3TM28vA3e8+Ni03JeYEBEMGLFno1/4qSff7XhNR9dNW0ZpMzEsD6Kdt4F+aCdre+3ZIg5OAfv6Ds9d79693cmTJ9sRdZfIM31t+HlTZmNpyxlYY+NUn0rBVTe7hPk7tRAS+Z2Kv7qb43GPp22tQY8/j0aDn7X+z4RELPJfeeWVEs88OTMRSZrhbwYdcbCORiI/JlP/b76ewfIOOgGm323ZlKXI2kzOVfPSll2TtbVlXikvftY1dtxe7ks9FxZHW+e9stRbRNiHH35YEGGwYQ02Xy+iPrOWvtZgXz+KX9JmAPH222/7wUVqMM95hAaOgddff70oT7XmIU/xO7purl271tssfkmbgRsDwdCRY04ghCOzfraMEnuwzwxRI4PHPNn1QZeVwVhoy/D+qpshDe3XS0Aiv15yD/A6hDPCnT+eYV+0yfpajjX4dBJxsOU69nIfX+EYPXq0Fxe21t+uwYNgn94z4W7rsuOvdJjXCQFjcS0dbesnANcJEyYkp9rtBVlb413/XX71Fpmtw1mhatI024fvblRzXR7j2Ls1zHiwxjYMcGfAZu9fhOeq2TfPckrImyB9+umn/ecVLT08vQh7luj813/9V0meLJ62pQQ6um7ifc8SfzwjtLUMFAm2bNPa9TC3/PE63rFIPRdhPO0/eAJ8AIMX4uN6SU5UNx+8PbrqHSXy28CytpaXjpypfEb4WYFPZzI1j1eHP45jL97ypQUGCXQO9lIlQn7ZsmX+GB5EBgEcY1nGhg0b/PR9+JUV8xaSPt5b8hGmK5GfZZX6j9sMCbaDNfZB4Nt38lOCkbvRgeCV5zlgOQ3XZQUb/PESKGlXCqTFc4hoZb034jT+ClOlNPJ43mZLWBZD/aKeUYfsO/lZL0fCmy/nsFyLGb3UH6piKQZLMni/hrppgwjE6DvvvOMFYyj0fvzxR7/cjzSxXbk2JY+2qqbM9dZN+0IZMzpZs3DUa9pTBmG2fAsb8bcMcMyEAwCboaUevvnmmyXfyScuS7kUOp8ANmS5pb1jRVsQ20Z1s/Pt1JVyIJHfBtbkazg0+PE/GnVEOI2EiTi2ts4vjs/vF198sWg617wJqbgIxLABIm2+tR/HJR4v43JcnvzmPlDmKY+Z87vcZxFNgBAv5eGzXJpNeZYqefHjNeWkzXXyAhvNylvqEx17bE84Zg2U7N0ZuyZVxxD1rNO2OPE2XpMfPh9xXPuduk/lEuYnRr11M6xH7KcCAwFsZraIt+GafK4vFz+Om7qfjnUcARuExTakHUDsW99tOVDdNBLaNoOARH4zKHZwGnha+RP0NAp8N92+nmCNBgIBr6oFGg3EBJ5cizt48OCil7EsLlt7KciWAnEfvH6pv2DKNCIvXyLsSZvPghEPQUB+JAxCss3ZR0yEL7zCnpe4sFtWYPD27LPPVvTk28xPNV58Eyc8bzxPeBm5Pu6ksvKk478ujWIJxcSJE339gWU8UI85wZcX86iXWZ58rkHof/zxx0X1njrNuu74fQ4JiZhyfb/rqZsIcupPOU8+ueFlW2xn7TLtLW36Rx99lHx3grpIu81zQltsbb7N6tRXQl3VKIFQ5GNDZj//53/+p/DXxOP0VTdjIvrdCAGJ/EboPYBraSD43B4vTiH2w0DjzYtWNOhZHqEwvvZFQAREQAREQAREQATyQUAiv8XtbC/U8UUc/niOeWVY23f+/Hn30ksveZHP6F9BBERABERABERABERABCAgkd/izwEvXTFFa0tzUlu+wFJu6UaLF1HZEwEREAEREAEREAERaDIBifwmA+2I5BDwvHzLGkvW8CL0bX0mX06xP3rSEfdWmiIgAiIgAiIgAiIgAu1HQCK//WymHIuACIiACIiACIiACIhAWQIS+WXx6KQIiIAIiIAIiIAIiIAItB8Bifz2s5lyLAIiIAIiIAIiIAIiIAJlCUjkl8WjkyIgAiIgAiIgAiIgAiLQfgQk8tvPZsqxCIiACIiACIiACIiACJQlIJFfFo9OioAIiIAIiIAIiIAIiED7EZDIbz+bKcciIAIiIAIiIAIiIAIiUJaARH5ZPDopAiIgAiIgAiIgAiIgAu1HQCK//WymHIuACIiACIiACIiACIhAWQIS+WXx6KQIdD4B/qLx1q1b/V885q8dd+/e3c2YMcOdP3++7sz9/PPPbsqUKf6vJ5Nm1r+9e/cW7rFx48bMeKRFmgqVCZw5c8bbDzvCnb9kjX3v3r1b+eKMGNgpy4Z23GxUj+0zbpv7w61SN80QP/30k9u0aZN76qmn/F9Fx/Zmd4ujbfMI3Lhxw7399tvuiSeecMOGDXP8dfqsUEu9v3fvnvv000/dqFGj/F+55y/cT5w40R09ejQreR0XgSQBifwkFh0UgdYggIhYsGBBUsD17dvXHT9+vK6M1iP0JPLrQl100ZEjR1zPnj2T9ly6dGndQl8ivwjzA/nRSnWTAh86dMj179+/5NmSyG/+44CY/+Mf/+gdLjaILifya6n39+/fd2+99ZYX95a2bXEM7Nmzp/kFUopdloBEfpc1rQrWFQjs3LnTN/Z4dL799ltHB4D3aP78+b4znzVrlkNsNDPYAGDEiBHuypUrhaQR+QjUEydOFI5pp3oC2G38+PHew7pu3Tp369Yth8du//79jgEbHfiBAweqT7DKmIiCbt26OexXKWTZvtJ1eTzfSnUTLzEC//HHH3d/+tOf3MWLF/2zlUe7dHSZaYOXLVvm298+ffr4mRNm47JEfq31/vDhw65Hjx7enl9++aW3I23FmjVrfF8wZswYd+3atY4uptLvIgQk8tvAkHTO/fr1cydPnixatoEw2Lx5c9L7F08N9urVy61YscIxnRuHeMqZqcGxY8d6zxANWhhCocc9Jk+e7EULAoX0aYwUmkPABBfC+uuvvy5KlEYe4c9zcfbs2aJzjf4w8RKLwtD2jd4jj9fjbX/44YfdokWLSuosy3Xw1i1fvrypaExgxAO2rJtk2T4rfl6Pt1LdNNEpL++DexoZjDOYoj/Fq4/AzxL5tdR7bEn7wKB89+7dRQWin54+fbo/d/DgwaJz+iECWQQk8rPItNBxxNVjjz3mfve735VMxSIatm3bVpRbPHc0+DbFF25HjhzpLl++XIhPIzV16tRkXNJmfWco9E3o/fd//7f3GoVps4/QD+MXbqSdmgngjRs4cKBfv33nzp3C9Qg3xKDZmE6kWcFEIQOI2Ftktpcnvz7aq1evLumg8eQjGOBN/Wn20goT7QwiKoVytq90bd7Ot1LdvH79uhs9enRJO5E3m3RWeSuJ/Frq/c2bN924ceO8PbGrBZxn9MWs/aedoC1WEIFqCEjkV0Opk+NQoanYiO4333zTiy/EwSeffOKFHo0CjQOB5RV47RCA27dvd4hDRPeFCxe81510/vznPxdKZGnjkScOcbmGa0ljwIAB7ty5cyXxSYelB6dOnfLXHDt2zC85oLMJG6fChdqpmQBiGi++eXcZkLHMgyl5+Nu/Zjb4DBh5zlKi0J4Vuy/Px+9//3v30UcfNX3JUM2w2uCChQsXFmZeqGfMzlCHjCfbLG9gPcVjkMbUfmrAlkqvnO1T8fN8rJXqJjO8vXv3ditXriwsHeFZ0suaD+YJrSTya6n3lhYfVqAfxnv/4YcfOpYFhe0EaSqIQDUEJPKrodTJcRBXTN/99a9/LfKS8zWOV155xTfwNPQEPHc0Bqzfiz3qiPhBgwZ5TwGDAvMacIxzYeBa0iAt0rRgQu8///M/i4Td7du3/YxAM0WK3TOvWxMSCHtEtzX0dN4I/x07dnj7NEvk2xKgrDWfZvuws7H9F198MbkULK+2S5Wbjpn6ged+2rRpfjAFP76EwuzbpEmTmiryeWZSM32pvFWyfeqaPB9rpbppebG6GG8ZjHfEux55tn9YdhPmWX1fLfXe0uKdK76uQ5rYk3o8Z84c99lnn3nHj0R+aAHtlyMgkV+OToucQ1xlvfC4fv36onPEZUCQWrNn60itMbIGxbwGcXHtix2hiDSh18wlIvF99ftXAnHnjbhnvebVq1d9BLPPli1bmoIM21YrCrkhzxMdES/8cd2uXbuako+umggdcyjAqIfwY1bO6ubw4cNLlknVw8Nm9LIGbHGatdo+vj5vv1upblpecALgCLKZVGb+7GXNefPmlbwHkjebdVR5rR+1fjW+Ty313tKydoJ2FYcA778RzNaLFy+Ob6PfIpAkIJGfxNJaB+mAs0Q+6/3Cc8Rl/T6f7IqDCQlrjKxBYTYg9Y1uE5GkaYF9GiCJfCPScVteqOXFWhr62bNnFy2b4q4M8DjH11kaDbWKwvB+5INnInxOwvPa/5UAsy9wQowxBR9+Fem7775zQ4YM8bNhzIo1GrAFz0b8vk4q3UZsn0ovD8daqW6a8KMexsFma5v9rkd8nzz/tn7U+tWYRS313t6voJ1gKR9L+sIZeWb8ONcsx06cV/3uegQk8tvApnTYoZC3LOOpmTBhgn85kxfBCKylpxFYu3atRStsbbmONfjWoCAuLl26VIjHji3XQSiEgl4ivwhTh/4w+6Zsby9JNuPrOqGtw6VZ1RSOa1etWuWfOZ49hWwCzHRQn/DCxYNqe0HW3r/ITqXyGavniASek3KhEduXS7ern2ulumkDxJdffrnkubJlWNbmd3W7dEb5Kon8Wuo97QKzLszGI+jDwLm5c+dmztSHcbUvAkZAIt9ItPAWYY13HrFtX1nB+/baa6+ViAam9fi0Ji9nvv/++4UXb3lBls9iMgCwlyrp4O17v3xhB3HAMd7k37Bhg3/xNn5pTyL/wT4oxhvb2UvOCDf7Tn5KMJJDvqDEUg2eg/fee6/IGxSXoBZRGF6LgOB9Adb8pt7rCONqv/ileOoX9Sz8Tn5qMAc36iSfymW51nPPPecQdVmhVtFer+2z7p+n4/XWTf6AHUvcmNGpNAtXjX1MGFIP7bnCDtRPaydSTp882aojy1pJ5NtMWWifcvUecY/IHzp0qH+Xgri0Fbb0SgO2jrRm10tbIr8NbGqdCQI9/hd/EpNOnk9t4TGM4/I7fkESMUgaqbgIxNibYHkJvfttgLBts4jHMOsTp7Htw0KanbBruU6B54XPnvK8VPLix2tL7ZlBnO7bty+8vfYzCFCf6OyNnW3hH3+u1pIwEWFxy9U9BvkIyGq9+NXa3vKi7T8J1Fs3w3rEflaopW4ycMC5Y89IuKX9IK8KzSNAHQwZx/vxgL2Wes+gjb9+HafJb2xc7185b17plVI7EZDIbwNrhYLNKj6ftmSZRKrxpnOgUcGTi/ePa/iLfHjww3XAVnRExJIlSxx/MIu4iBBexrWXfSweW8tLOaERxtd+4wSwMba2TpzBF/bCblmBwduzzz5b0ZNvMz/ViMJQnCBKn3zySf/n1ystCcnKYx6PUzf5i5YTJ070dROOzJZRXzmXChxnDS71spwnn3jMzFUzYOM+tdg+lS8dc779rbVuItJojyt58mu1D/Fpt20QSR/xzjvv6A8UdsCDWqvIr7Xe00/TX/OchH3y+fPnO6A0SrIrE5DIbwPrIqxjz0AbZFtZFAEREAEREAEREAER6CQCEvmdBL6W20rk10JLcUVABERABERABERABCTy2+AZkMhvAyMpiyIgAiIgAiIgAiLQQgQk8lvIGFlZkcjPIqPjIiACIiACIiACIiACKQIS+SkqOiYCIiACIiACIiACIiACbUxAIr+Njaesi4AIiIAIiIAIiIAIiECKgER+ioqOiYAIiIAIiIAIiIAIiEAbE5DIb2PjKesiIAIiIAIiIAIiIAIikCIgkZ+iomMiIAIiIAIiIAIiIAIi0MYEJPLb2HjKugiIgAiIgAiIgAiIgAikCEjkp6jomAiIgAiIgAiIgAiIgAi0MQGJ/DY2nrIuAiIgAiIgAiIgAiIgAikCEvkpKjomAiIgAiIgAiIgAiIgAm1MQCK/jY2nrIuACIiACIiACIiACIhAioBEfoqKjomACIiACIiACIiACIhAGxOQyG9j4ynrIiACIiACIiACIiACIpAiIJGfoqJjIiACIiACIiACIiACItDGBCTy29h4yroIiIAIiIAIiIAIiIAIpAhI5Keo6JgIiIAIiIAIiIAIiIAItDEBifw2Np6yLgIiIAIiIAIiIAIiIAIpAhL5KSo6JgIiIAIiIAIiIAIiIAJtTEAiv42Np6yLgAiIgAiIgAiIgAiIQIqARH6Kio6JgAiIgAiIgAiIgAiIQBsTkMhvY+Mp6yIgAiIgAiIgAiIgAiKQIiCRn6KiYyIgAiIgAiIgAiIgAiLQxgQk8tvYeMq6CIiACIiACIiACIiACKQISOSnqOiYCIiACIiACIiACIiACLQxAYn8Njaesi4CIiACIiACIiACIiACKQIS+SkqOiYCIiACIiACIiACIiACbUxAIr+Njaesi4AIiIAIiIAIiIAIiECKgER+ioqOiYAIiIAIiIAIiIAIiEAbE5DIb2PjKesiIAIiIAIiIAIiIAIikCIgkZ+iomMiIAIiIAIiIAIiIAIi0MYEJPLb2HjKugiIgAiIgAiIgAiIgAikCEjkp6jomAiIgAiIgAiIgAiIgAi0MQGJ/DY2nrIuAiIgAiIgAiIgAiIgAikCEvkpKjomAiIgAiIgAiIgAiIgAm1MQCK/jY2nrIuACIiACIiACIiACIhAioBEfoqKjomACIiACIiACIiACIhAGxOQyG9j4ynrIiACIiACnUfgl19+cVu3bnWDBw92Dz30kOvevbubMWOGO3/+fN2Z+vnnn92UKVN8eqSZ9W/v3r3Je9y9e9ctWrTIX9ezZ0934sSJZDwdrJ/AvXv33L59+9zEiRPdI4884lkPGDDArVq1yv3000/JhM+cOeOfDZ4RbMozw7ODveJA+p9++qkbNWqUe/jhh/09uNfRo0fjqPotAmUJSOSXxaOTIiACIiACIlBKAIG/YMGCpAjv27evO378eOlFVRxpVORv27bNPfroo/6fRH4VwOuIwsAJtqkB2LRp0xw2DMORI0cy4y9durRI6N+/f9+99dZbXtzH6TNA2LNnT5i09kWgLAGJ/LJ4dFIEREAEREAESgns3LnTCzG8rd9++61DnN24ccPNnz/fi79Zs2Y5BgLNDDYAGDFihLty5UpJ0gwsGGBw71deecULS3nySzA1fODkyZN+tuT06dMOrzv/YD98+PAS5jwT48eP9974devWuVu3bvn4+/fv97ZCuB84cKCQp8OHD7sePXq4/v37uy+//NLH5Zo1a9b4523MmDHu2rVrhfjaEYFyBHIn8s+dO+crZ69evXxDTAV7/vnnHRWLRjoONm1GxWJajqkz9g8dOpSMf/XqVbdkyRJn6T/++ONuxYoVRVN4P/zwgxs2bJhbuHBhfDu3cePGkkbCvAacYyqQ9EiXvDCFF08Nk2emEimXTQ2SH/JF/lKBdJlqZMoR7wHXvfrqq4X4sFm2bJm/565du0qSoDObPn26b7SYllQQAREQga5KwMQ23tyvv/66qJgIMIR/v3793NmzZ4vONfrDBhb0BXGgDZ86daobOXKku3z5su9f5MmPKXXs75UrV7revXs7BgEWWFZFX80SqnhpDst16G+XL1/uo9PPEq9bt25u9+7dloTfWh/LuYMHDxad0w8RyCKQK5Fv4jqeAuM3I2eEfhiokEylpeIj0kkvDOZFScUPG2XLR60in7wwwIjTj0f2NCpxHPuNRwHPQhjoEOgYLE64DfN47NgxzwkPUdxY2UBk3rx5JefCe2lfBERABNqdwMWLF93AgQP9Gus7d+4UikPbimAz50rWuvnCBTXsmEeYAUTsyUUc4unlvnZP2m6J/BoANxCV/hDHGrMo8fKb1atXe9EeCnMccXjvsSX9Le9gMHC8efOmGzdunBs9erS7fv16IUd48jdt2uSeeOIJHz/UE4VI2hGBBIHcifw5c+a4L774orBmLpwGs9G0cWJtIyPwoUOHus8//9yLVyonU3R4tcOGln3ENvGp5DaVinflgw8+cO+//74l6wcH9XjyaQz69Onj8ObQqPz444/uhRdeKGlAmAZkrSjeBBPjly5dcpMnTy6Ji3eAqV3S5oUxvPB0GHRcrP1jetGCeRJoyGJv/dq1a30HE0472nXaioAIiEBXImBODeszaOdpK5lhDZ0kzRRj1h/h/Y0Dwh6Bv2HDhsIMs0R+TKm5v202x+zNexDMhtNPhgE72KwOfSszPzjb7Dq25jQ0ByB9MX0waX344Ye+3w/jh8638F7aF4GYQK5Eflx4+20NdlhxrAKnpmPtunBLI5s1JRfGY98qhE2+1wAAIABJREFUcng/i0OnEHtfLH/PPPOMQ6yHYcuWLb6xMO9NeC7eJ20aijCupc0AgI6qUmCpDuUMOxrzPrBcJ27gKqWn8yIgAiLQbgSs3UTY0xbifKFtZUknwn/Hjh3+d7NEvi0Bimdt4WYzsSzVCdtw+pe4L2k3zq2cX9MIofg2DYDz0AJ2QMTjAOOlXOJwzVNPPeUdaZMmTSoR+bzXwdd1uI64XIOD8rPPPvM2TWkHu5+2IhASyJ3IZ0nNzJkzSzwuVKSw4pgQp+G8fft2yCy5T2NORcSLXilY2uH97JpyIj8V364Lt3jvP/74Yz+zYJ/3ChuiUOSzzzkGC9UE62wmTJhQ6FC++uor70VKrdWvJk3FEQEREIF2ImAi39pV2lnWUts7T7W2q5XKbv0L3vww0NZzX17SjGdX6S8k8kNaHbePHb755puCh56ZbQvYwZ4Ttgh3BDyrAmygwAu79K2mDSw+moKBgdnWnrvFixdb8tqKQFkCuRL5NLy2VtIqUbgNRbRVNps2K0vROf/CbLUvxFja4f0s/UZFPo3NG2+8UdSohGVkPyXyt2/fblmouKUB4x0G1ugz/cjSpdQ60YoJKYIIiIAItCEBXqhlCQYibPbs2Y4POoRh/fr1VTt9wutS+yz95Gs6KS++9SVxG5/6Td+i0LEE7F0NW2PP3ZjZwR7M9rD0Jpzt/u6779yQIUP8C9M4E1mHz3p84rOkh6U99LEWWELLuWqdcnadtvklkBuRj/jlhVEaZV5QCqc1bXQcim5bgjJo0CB34cKFik8I6+SpfKQdVsrUhdYwx7MEHGfZTOx9SeUvlS7HrJFh3TwvAlFuCzTy5DEU+Xy/97HHHnNz584timvXpLZ4FUifxovlQzRSoecidY2OiYAIiEBXIUD/wWxm3FZTPntB1tZhN1Jm+hL7dCJ9TBysL6Fdr/RPIj+m1/zfsWjnDrbEFe972B9zzr6WZO92cJ6PV+AwjL+Hzzn66Wqdic0vnVJsRwK5EfmMkhHVVBAqFlNlVBr+gpytkwtFPo0rn6qk4Rw7dqyPR3yOI/o5x/SaBYQvU6bMFLBO087x8gz3C1+8tdE6HYR9B/fUqVP+Ptwv7jhqEfm8bMsnvJj+s8+30SGxRtTW94Uin3yGLwwj2ikjZWUZTvjirZWVczREeJd40QuRX81AyK7XVgREQATanYA5TegfaL9pNxH49p38lKijzCwZpa/As1tpeSftKo6m1FfRKvGjP4v7kkrX6Hx9BOgTmc1h3Twv4IbLqmwmxl6MZr0++sO+kx/bCHGPTuGDH6zjJ274gZBwlqC+3OqqPBHIjcjHqNYoZ3k8QpFPfF5oQgCn4iOY8aJYoIHfvHmzf/EqFT/0ohDXBhBhXNZ1prxDtYh8W+MXphvvhyKf/NOoxF+FsGtiJlZeBgA0RMRjTShlUhABERCBvBDAeYLjyNrKcGvfqk+xoE21uFntK9dZP8Hsc8qLn0o7PEbasYAMz2u/fgJZWgJbsVwW0R8G+tjUUmHi82nMsP/k2qxPdzODziBRQQSqJZArkc9aODzTVBQaWYQtApUlK6yDSzW4NOThH4lCiPMHqFJ/PIuKyvHwj1BxL64nnTDw+/XXX/cVn4rOYII/sIUAjxvmWkQ+98AbzzpRa1QGDx7sGxLzEMQin2uYiQhfSIZNpT+exYAEoY/gVxABERCBvBGw/iHsU2g3QwdQzASRRptcyZNvyyLr8eJzT4n8mHzzfscin/6Sr8vRh+N5j4NpA7QDGoI+n/fY6JNDgW/XoVX4ahPPCVqFvpz3A+M/fGnxtRWBLAK5EvlZEHS8dgIMJFgSpKnD2tnpChEQAREQAREQARHoaAIS+R1NuIulj5cCTxReCLwLqVmBLlZkFUcEREAEREAEREAE2o6ARH7bmaxzMhyv9U+tJeycnOmuIiACIiACIiACIiACMQGJ/JiIficJmMi3tYT2xzySkXVQBERABERABERABESgUwlI5Hcqft1cBERABERABERABERABJpPQCK/+UyVogiIgAiIgAiIgAiIgAh0KgGJ/E7Fr5uLgAiIgAiIgAiIgAiIQPMJSOQ3n6lSFAEREAEREAEREAEREIFOJSCR36n4dXMREAEREAEREAEREAERaD4BifzmM1WKIiACIiACIiACIiACItCpBCTyOxW/bi4CIiACIiACIiACIiACzScgkd98pkpRBERABERABERABERABDqVgER+p+LXzUVABERABERABERABESg+QQk8pvPVCmKQFMJ/PLLL27r1q1u8ODB7qGHHnLdu3d3M2bMcOfPn6/7PvYXjEmv3L+9e/cm73H37l23aNEif23Pnj3diRMnkvF0sJTAmTNnvP2wI+yxK/aFab0BO5WzI+emTJnisDvhhx9+cMOGDcu8Jsvu9eavq17XKnVz48aNmbYM7d5V7fCgy3Xv3j23b98+N3HiRPfII4949gMGDHCrVq1yP/30UzI7tdR70uevyo8aNcrxV+a5B/c6evRoMm0dFIEsAhL5WWR0XARagAAiYsGCBckOvG/fvu748eN15bJRkb9t2zb36KOP+n8S+dWb4MiRIw5eKUG+dOnSuoW+RH71NmhWzFaqmxL5zbJqdeng1Miqx9OmTSsMpi21Wur9/fv33VtvveXFfdxO4BjYs2ePJautCFQkIJFfEZEiiEDnEdi5c6dv7PHofPvtt44O4MaNG27+/PleKM6aNcshNpoZbAAwYsQId+XKlZKkGVgwwODer7zyiu/s5MkvwVRyALuNHz/ee+XWrVvnbt265fDY7d+/3/OkAz9w4EDJdY0eQBR069bNIQQtmCd/4cKFdkjbGgm0Ut3Ethps12jABqKfPHnSz2SePn3a12HqMe3i8OHDS+xQa70/fPiw69Gjh+vfv7/78ssvffq0FWvWrPF9wZgxY9y1a9cayL0uzROBthX5JkTopOgk+/Tp4x5//HE/yqWyMQXO7+3btxfZ89y5c75y9urVy4skOtbnn3/eUbEQUHGwaTMqFlNmTJ2xf+jQoaL4NrKnsWW6bsWKFf7+xGeaLV5awb24Zzzdt2nTpoZFG3lmKpFy2ZIAyrtkyRJ39erVQhGZPkSsTZgwITnF+NVXX3lxsGzZsqKyct3kyZMLacfeBk31FxA3tGPPOJ33119/XZQWjTzCv1+/fu7s2bNF5xr9YeIlFIWWJs/21KlT3ciRI93ly5cd9U/iwuiU31IvaA9Y5hQvzWG5DvVo+fLl5ROp8awJjHjAJpFfI8goeqvVTYn8yECd9HPlypWud+/ejkGAhVrqPbqA9oFB+e7duy0Jv8WZM336dH/u4MGDRef0QwSyCLS9yKfzGjRoUGH6++mnn3ZPPvlk4ffo0aPd9evXffmtY4tFKb8ZOSO6w0BHzBR6Kj7rWUnPgol84jMIiK8JR99UZMQ8HX4cj9+IqKx1fXa/cttyU/d4Eun4CZRv3rx5vuzHjh0rSpI8Iu7hEp4zL24q33ZMIr8IZd0/Ll686AYOHOjXb9+5c6eQDvZDDNoArpm8TRQygIi9RTwTeJO4r91TIr9gloo7q1evLumgGZDjvYc39afZ66dtwMYgIgzWFsqTH1Kpfr/V6qZEfvW264iY9KU41nCaxcvuaqn3N2/edOPGjXOhbiG/ePLRDE888YRvJ1IOmI4ol9JsfwJtL/LpGKkUNLqTJk3yFQAv44ULF9zLL79cNKqmY5szZ4774osvCmvmwmmw2IvGumOE+NChQ93nn3/uRTGdMlN0COBQBJnIJz/MKtC5UvF//PFH98ILLxR17haXBoGGgXika9N93PODDz6o++liZoN13HgTSJtw6dIl733HQxB6AcxbT0MUhu+//94PlvAc2HIQ0mJQgMjbsGFDYbmB5Ruh8t1334XJaL8BAvac2HPJwI9lHsxQ2YCKbTMbfHvmY1FIMRD2ZnsEP0Eiv3oDw8pmXuDH7AyD7tCWsfOg+tRLY9I+4VxIDdhM5If3/s1vfuO9iMx2KpQn0Gp1kzYgtCX19Pe//7376KOPCu13+RLpbK0EbDbHuPOOEi/eWn9p6dVS761e8mEFHDuk9eGHH3pNYfdhq8G50dW2EoG2F/mIVnsRhQcfgYzAJvC7mqUE1mCHFccqcGqpRAqqpfHMM894QR3G2bJli2+AzfuJoA7zGca19XisdTaBHp5vZN86AssHadmyj9hzQJw4j8YE0fCPf/yjKCuIz2pYF12kH2UJ2DMFW0Q3g0caeJaNIfx37NjhfzdL5NuzEM46WQZZmsPgOZ5lqraOWTp53sIKEY/nnpfzqF/Y86mnnvJtGE6KZop8nhnuwcAtDiYmQuFg+7bsMb5Gv/9JoJXqJrmytt1sGG5ffPHFhmaG/1lq7YUErD8MWVPfWG6D89BCLfXe6iXvXPF1HdoD0iddHJSfffaZ72dDrWL30VYEUgTaXuSHnaJVJioKgd+x8MTrPHPmzBJvKBUprDhW2RA1t2/fTrErOmaNfphGUYTgB3HMoxcc9rt230am7RkcfPzxx96LZ5/3ChuiUORz07Vr1xbNNJjHfsiQIUUDFjwLeBjwEjFw4TczEH//+9/9bAdLS5hRUWgOAXumzHbYkg7E3qvAjpzDFs0ICIWUKOR54L68CMb7GGFI1bHwvPb/SQBWZku2tF105NQhEwy8uBfOEP7z6tr2eGGapYypAVsqJfJA3X3ttdf8M5D1nk7q2jwea5W6mcWe54lnizpLnd61a1dWVB1vAgHayG+++aYwM0efaqGWem/9v7UT2A6HgLW79twtXrzYktdWBMoSyJXIRxQhUK0CxVsqowWrbDZtZseztlb5wjSy4hInSxDbfesV+TQ2b7zxRmYZKXMs8u0FXHvBlqU9CHz7HZaD9fm2LjDkR2PEmkFbxhFeo/36CPBCLYNB2M6ePdvFyyjWr1/vz7E8q9FQThTaMxnaO2u/WbMKjZanFa9n9gVuzMgwBR9O67PMjTpXrVOhUvmyBmyVrrN8hM6TStfk8Xyr1M1K7GkjeOZULyuRas55e1cj7L9rqfe8P8isOjZjKR9L+sI+lVULnGuWY6c5pVYqrUwgNyIf8csSGAQTLw+GL7amBLq9AMNLvazvrxRSaWRdQ6UnHynvii3XwXMaVu6stOLj1siE6/0tDg09DUQs8mHDWntbu8typ/iFW9KwAQSzI3gIGTBRDl50/uSTT7xH0u6lbeMEeEbxqMazUaRsL8hmzQjVcneeM/s8my11C6+XyA9p1L9Pfae+4IWjLoUB7pyjbWg00F7RboUv2VebJtf+9re/LXnxr9rr8xKvVepmOd7Ua9aI0+bHX5krd53O1U/ABsnhYL2Wem99cbgM2XLDublz5xbNuts5bUUgi0BuRD5Lbqh4VB46VKanqTT8BTlbHxt64Wkg+QwmDeTYsWN9POJznI6Qc+G0ei0i3152ZSoVLyzpkh/W6vKSL3kkTj2Bl235hBfT/vZpRTok1m/b+r5Y5HMf7odox1PAYCh84dbyYWKPAQGNGQMhhY4lYAMznsFTp0755w+Bb9/JTwlGcsQaegZirK9+7733yg4YGxGF1JnUIKRjqbRn6jZbQj0LX1ynDWBQnsWRNmfz5s3+XYznnnuu7MvtxC03YMsiZ20hzxltHmmQlkI2gXrrJktGafuZ0ak0C1dv3aRv4l0enrVqHVXZJdWZSgSoP8y0sm6eF3DD92Bqrff2dy3QAmgCtEH4gZBwlqBSvnReBHIj8jG1Ncp0Yql/ocgnvgmlVNx4OrsWkU+DkPVpTrx5LLchTj3B1vam8mzHUiLfPFN8CYilRKlZBsQlnyi1dMItnQlLm+K/B1BPGXTNPwlgFwanIWvbt2/V/zP2P/fCZ71cp4CQY8DKc5fy4v8zxfSeRH6aS9ZROnDqitnQtvDPWu5mg2uLm6q/dj+W3iEgK3nxrb2yNMPtH/7wh6KZTktb22IC9dZN6ozxjvuc8A611M0wTUubLQNHvuCm0FwCYfsa8s7qv2up9+X0Ac4ABokKIlAtgVyJfNbA4t2golAx8XKyLIY/Oc06uFSDS0POlOeAAQP8Nbz8yB+wYllN6OmyTjOVRsoYVGS+fsFSFxoG/rFcxl7ES11T7THW1LOG28QEfxgMAWEegiyRQH7gYst2UvfjhV48FfxxLT65FzZw7Mcv66bS0LHaCNgzGD63/GEzxF9WYID67LPPVvTk2/sYlURh1n143rM80FnX5Pk4bQZth/0RPKv31M2wPQkZcZw1uNTncp584vEeDWlWGrBZe2X1l7aQ2Tv+yB+eQ4XqCNRTNxFptMmVPPm11M1Q5GN/+pW33nqr8DdRqiuNYlVLIBb5lepPrfUerUJ/zHNCHZUTrVrLKF5MoG1FflwQ/e54Anw9AKFJhxK+NMidmR62v1OQNYjo+BzqDiIgAiIgAiIgAiIgAhCQyNdzUDUB817w8g9/LMs8j3xKk7WlrP2UV7dqnIooAiIgAiIgAiIgAh1GQCK/w9B2vYR5CciWANk0f7hlmpilTfW+T9D1iKlEIiACIiACIiACItA5BCTyO4d7W94Vzz3f7eWPibEm3wS+rUeM31Noy0Iq0yIgAiIgAiIgAiLQBQhI5HcBI6oIIiACIiACIiACIiACIhASkMgPaWhfBERABERABERABERABLoAAYn8LmBEFUEEREAEREAEREAEREAEQgIS+SEN7YuACIiACIiACIiACIhAFyAgkd8FjKgiiIAIiIAIiIAIiIAIiEBIQCI/pKF9ERABERABERABERABEegCBCTyu4ARVQQREAEREAEREAEREAERCAm0rMj/+eef3UsvveT4BvuePXvCPGtfBERABERABERABERABESgDIGWFfknTpxwPXv29H9waeHChWWKUP2pjRs3+jRJu1UDZaXcrZzHVmWnfImACIiACIiACIiACPxKoGVFfkd48usR+ZcuXXL/9m//5pYuXfpAnhmJ/AeCua1u8ssvv7itW7e6wYMH+0Fv9+7d3YwZM9z58+frLgf1a8qUKYW/Wmx/vTje7t27N3mPu3fvukWLFvnrNShNIso8eObMGW8/7Ahv7Ip9YVpvwE6x7eLf2Bu7pwJ/ybrZTpXUfbraMdXNrmbR6spz7949t2/fPjdx4kT3yCOP+Lo3YMAAt2rVKvfTTz8lE6ml3pP+p59+6kaNGuUefvhhfw/udfTo0WTaOigCWQRaVuRnZbiR4/WIfJtRaNZsQqX8S+RXIpSv84iIBQsWJAVc37593fHjx+sC0qjI37Ztm3v00Uf9P4n86k1w5MiRgpiORTiOhHqFfiMi/9q1a27MmDHelgiKB9XWVU+tNWOqbramXR5ErkwXxHWY39OmTSsZTNdS7+/fv+/eeustL+7j9HEMaPnyg7Bw17mHRH4FW1plflAdn0R+BYPk7PTOnTt9Y49H59tvv3V0ADdu3HDz58/3wn/WrFkOsdHMYAOAESNGuCtXrpQkzcCCAQb3fuWVV7S8rIRQ+gB2Gz9+vPfKrVu3zt26dcvhsdu/f7/nSQd+4MCB9MUNHEUUdOvWzeHkiAODCgYX3Pudd97xtnxQbV2cl3b7rbrZbhZrXn5PnjzpZzJPnz7t6zD1mHZx+PDhJe1hrfX+8OHDrkePHq5///7uyy+/9OnTVqxZs8b3BQzIGZgriEA1BGoS+RcvXnQDBw4s6+kxUbx8+fLC/VNTT5MnT3ZMX8Uh5ZHKWjLAtYieQ4cOubFjxxamzcLRb+hlNE8+lfGTTz5xv/3tb71QYvvhhx/6tEjzhx9+cMOGDUt6T8O043ylpm5fffVVd/Xq1biY/jflnzlzpn+5mHTpaP/1X/+1pJFIXlzmILyZSnz++ed9mqTdq1cvt2TJkqK8cH/E2oQJE5JTjF999ZUXB8uWLSuw4bZch/3Ib8jD9mMuZbKqU2UImNjmGWY5RRho5BH+/fr1c2fPng1PNbxv4iUlCpmKnjp1qhs5cqS7fPmybwvCOtbwzbtwAtQLPOUsc4o99izXof6E7WYzUJjAyBqwMQB47LHH3KZNm7xIwZYS+ZXJq25WZpTHGCtXrnS9e/d2DAIs1FLv0TO0DwzKd+/ebUn4Lfpi+vTp/tzBgweLzumHCGQRqEnkX79+3Y0ePdp38rdv306mycPHA7p9+3Z/ns6MdWp0biYCbYtIjAUhv+28beM44Y3ppLLEJteHAgTRQof29NNPl+SHNMyLVo/Ip9GfO3duSd7JA0L6m2++CbPtp9z4cpCVMdyGeS66qMofKYaWPp5EOn4Ctpk3b573Ghw7dqwodRobxD0ehfCceXEtvdS2nL2KbqIfZQnYoJr193fu3CnExX6IQXvum8nbRCEDiNhbxDOBNymst5p5Kpil4s7q1atLOmgG5LQ78KYulVs3X/EGiQg2YGMQEYcLFy64QYMGFZYXmINGIj8mVfpbdbOUSZ6P0JfiWKOvj5fd1VLvb9686caNG+d1FnrLAp58BuJPPPGEbydSDhiLq60IhARqEvkIe7x4PIQ8jAQe4CFDhjheUCUguhH0TEET6MAQBc8995z3ACMUqBCfffZZWS8y15pYzRIxeBXxQiOK6cxIN6xs8do4KoaJUpY7IGiIz9Q5xxcvXuzzHP5XbcfHGmXKjefeljhQMd99911ffsQ09yKYFxaR//777xcEHAOFP/zhD0UDkzAv1e7DnnXceBPsntgH7zsDsNALYN567BiG77//3j355JPec2DLQUiLcmDPDRs2FJYb2DQlQuW7774Lk9F+AwTs2TPvLs87z2o8OGxmg2/PcUoUUg/N9tRjgkR+9QaGlc28wI/ZGQbd1iaxZQYRJ0MzAu0MU/upARttDe0jywus7bbnTSK/Mn1jpbpZmVVXjWGzOVZ/eUcJh6b1l1buWuq9ORjNsUNarDLo06dPUTuhOmp0ta1EoCaRT2IIYeuI7CFH3JoQR3CEnmgaQUa3qaU5CMt4aivMcCWRbxXixRdfLKpYeMcQ25ZPS5O8kdc33nijIH45x3IHOt9UxbHGPHXO0sXLSqVMdaYIY9Yt05maZ5RykY+1a9daEoUt9wn5FU40YccGOWYrkrQBBzM0oefA8sjgyYLZm3L+4x//sMN+i/jsqHwX3ShHP+zZgy2i2xp6vuZAvdqxY4dv+Jsl8u1ZSK35ZGkOS3QY5Idfj+jI57WrmRpWtEk4PhDYtAEIhKeeeso7RyZNmlTSZjXCgGeGezBwCwMDDLyCzGrilLFgz1u5ts7i5n1rrFQ38/skWH9oIp8t9Y3lNjj4LNRS703T4ITk6zq0F5bunDlzvHOUflZ11OhqW4lAzSI/FPGI42eeecZ/RooHm84D8WHi2jz/YSWI96kU5vWPM1tJ5NvyIR56OisENf/Y51g440DaYd7De1nFSlUca8xT5ywNxBEiPi5b+DsczJCPrHJzn0bFMgw+/vhj78Wzz3uFeQlFPmVgsBF6+Lkej304Q0M8G8zgzd2yZYv/zYDq73//uxs6dKh/X4NpbIXmELBnz2yHLaln9o6H1Q9s0Yxgz2UsCnkeuC8vgsWD9WY8r83IezukASuzJVvaSTpy6pAJhtAZ0EiZmE1kHX5qwGafy1yxYkXRuzb2vJVr6xrJU1e61liZPVU3u5J1ay8LbSRLcm1mLnTg1VLvTYvYc4VOwCFg7a49d6lVB7XnWlfkgUDNIh9hYYIQLy8ebDyKeIIRuzzQtq7UOi57YFPbLLELfBMxsSgNDbNr1y7/6bc4bYRo6KXimo4S+XHFjPPC71jkG8OwLOzDrxGRT2PDTEUqD3Ys5kkDwmyLvWDL9D0C336HeWR9vq0LtPTYYke8g7aMI7xG+/URsBkm2M6ePdudO3euKKH169dnDhaLIlbxo5worOb5tmeBOqaQJoADBE7MyDAFH07rs8yNOsdMSdb7TulU00ezBmzE5pzZq9zWnDXpO+T7qOpmvu2fVXp7V8M0EPFqqffmuKReMmBgQB72qWgazjXLsZNVDh3vOgRqFvk2kkTg492jw+DBpkP4/PPPvcBnqskeTEQrX+Spx8NbSeQziGCUy1IblpDgTeEfX9rhizuWBzMXeU0JaBMx5DUOVt7UOYtrg5l45sDOx1teSqaihkthiGNLIlJ5jNPI+m2NDKKdF4EQ/Rasc49FPnHw3NtyI/IVv3BLGjaAIH94CBlIIUBZu8/XivBIKjSPQPjOCc9hGOwFWVvjHZ6rdZ96Yp9ni59J0rL6UU4Q2jmeMYU0ARwS1Be8cGG9JDbcOYcgaDTYC7XhS/ZhmtYOmM2ythL5IbXifdXNYh769SuB1GC9lnpvfTFOwNhJyTk+7pHlIJQNRCBFoGaRbw8xHRViGs8uDx/rzvHqM0UcvsRpDzgvfYYvgqYyEx+rJPJNgCNQECKI7XKhHpFvHhtEM8uKsoQs03N00q+99pof0GTFI38wQ0QzNX/q1CmfJut0WfJCh9uIyIcxswakTd4JdEjMttBpk34s8onDC7g2+4Et+VRX6Gkkjok9BgQ8B/bytb+J/usQAibIqGs8KwhyBL59Jz8lGMkIA0YGYryk+95775UMeMPMVhKFYdx4v9GZpzi9rvzbZkuoZ+GL67QrtC9Z9R6bb9682Tsw+IABdS8rVBqwZV3HcWtPyzk0yl2ft3P11k0+VMDSN2Z0spaqGkvVTSPR2ls0EDOtrJvnBdxwyWOt9R5xj5BHD6AL0BLhd/LDWYLWpqLctQKBmkU+gpEXxBCLoefavNMct89nUkCE90svveTjcy7+Fz6w5hGP44S/aVgtUKn4U9LhedtH3PBdeISphXpEPkKXP/pj6YbbUCzT8fJpzvB8uB92nFlp0vCzLCOrs7dylNtWwzDMt6VlnqkXXnjBz7wwOIsD4jKrjAgXBnnnz5+PL9PvBghgF5ZwhM+S7du36lPJmwAhbljH4riIQtZmM0BNefHj+PFvifyYSPnfdOAWTemYAAAFVElEQVTUFbOhbeGftdzNBtcWN1V/7a4svaMdyfLiW7zUViI/RSX7WL11kzpjtgz7hfhOqpsxkdb5HbavZku21OP4wx7kupZ6z4CBz3CG6do+zgAGiQoiUC2BmkW+ee156MKpZfN4p6aSELV86YFlHVQCe2DZhgKkGoEainwaQTzopPOb3/zG/8GnMG32wz/0VI/IBySdLAMG/qBUmH7c2dLo8wmt1MAjbsx//PFH742lw+ef/dGsrDxWa1DisaaewYKJicGDB3sBYR6CON+WNjaifLZsx46HW17oxVMBC5iHPNiPX9YNr9V+fQTsuaKBh3FqABunjCf/2WefrejJt/cx6hGF3FMiPyZf/jdtFn/RcuLEid4zT3tIfaNuci4VOM4aXOpzOU8+8XiPhjTrGbBJ5Kfolz9WT91EpNEmV/Lkq26WZ9+ZZ2ORT5vM7DfLhFOz+LXWe9NMPCe0+dR9OdE60+Lte++aRX4rFRWxysP/9ttvl1QsBh0sWWnEK95KZW2FvPD1AIQmwo5GKAy8dG0zPFmDiDC+9kVABERABERABERABDqOQFuLfMQmo1y85+E33pkR+N///V+/7p3ZA/6wk0LjBMx7wcs/MDXPI5/WZG0pfz1Tg6rGOSsFERABERABERABEWiUQFuLfPvrnAj91D+8/H/7298aZaTr/z8B++vFKdYcY5kAAy6WdCmIgAiIgAiIgAiIgAh0HoG2FvmsfeMzkSwTYU2ciU/Wi7PGnTWNCs0jgOee7/bOnDmz6P0EW4/IWmPz7jfvrkpJBERABERABERABESgVgJtLfJrLazii4AIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCEjk58rcKqwIiIAIiIAIiIAIiEAeCEjk58HKKqMIiIAIiIAIiIAIiECuCPw/Zvos7Cj2ajQAAAAASUVORK5CYII=) 1. Install Java 8 and NLU ###Code import os from sklearn.metrics import classification_report ! apt-get update -qq > /dev/null # Install java ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! pip install pyspark==2.4.7 ! pip install nlu > /dev/null import nlu ###Output Collecting pyspark==2.4.7 [?25l Downloading https://files.pythonhosted.org/packages/e2/06/29f80e5a464033432eedf89924e7aa6ebbc47ce4dcd956853a73627f2c07/pyspark-2.4.7.tar.gz (217.9MB)  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 217.9MB 67kB/s [?25hCollecting py4j==0.10.7 [?25l Downloading https://files.pythonhosted.org/packages/e3/53/c737818eb9a7dc32a7cd4f1396e787bd94200c3997c72c1dbe028587bd76/py4j-0.10.7-py2.py3-none-any.whl (197kB)  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 204kB 17.7MB/s [?25hBuilding wheels for collected packages: pyspark Building wheel for pyspark (setup.py) ... [?25l[?25hdone Created wheel for pyspark: filename=pyspark-2.4.7-py2.py3-none-any.whl size=218279465 sha256=90dbb9e58f0f2c2d84f268ee8dd9f2f6334927c1a225a2bb63a9208d3133b1ae Stored in directory: /root/.cache/pip/wheels/34/1f/2e/1e7460f80acf26b08dbb8c53d7ff9e07146f2a68dd5c732be5 Successfully built pyspark Installing collected packages: py4j, pyspark Successfully installed py4j-0.10.7 pyspark-2.4.7 ###Markdown 2. Download Amazon Unlocked mobile phones dataset https://www.kaggle.com/PromptCloudHQ/amazon-reviews-unlocked-mobile-phonesdataset with unlocked mobile phone reviews in 5 review classes ###Code ! wget http://ckl-it.de/wp-content/uploads/2021/01/Amazon_Unlocked_Mobile.csv import pandas as pd test_path = '/content/Amazon_Unlocked_Mobile.csv' train_df = pd.read_csv(test_path,sep=",") cols = ["y","text"] train_df = train_df[cols] from sklearn.model_selection import train_test_split train_df, test_df = train_test_split(train_df, test_size=0.2) train_df ###Output _____no_output_____ ###Markdown 3. Train Deep Learning Classifier using nlu.load('train.classifier')You dataset label column should be named 'y' and the feature column with text data should be named 'text' ###Code # load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns # Since there are no trainable_pipe = nlu.load('train.classifier') fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] ) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df.iloc[:50] ) preds ###Output tfhub_use download started this may take some time. Approximate size to download 923.7 MB [OK!] ###Markdown 4. Test the fitted pipe on new example ###Code fitted_pipe.predict("It worked perfectly .") ###Output _____no_output_____ ###Markdown 5. Configure pipe training parameters ###Code trainable_pipe.print_info() ###Output The following parameters are configurable for this NLU pipeline (You can copy paste the examples) : >>> pipe['classifier_dl'] has settable params: pipe['classifier_dl'].setMaxEpochs(3) | Info: Maximum number of epochs to train | Currently set to : 3 pipe['classifier_dl'].setLr(0.005) | Info: Learning Rate | Currently set to : 0.005 pipe['classifier_dl'].setBatchSize(64) | Info: Batch size | Currently set to : 64 pipe['classifier_dl'].setDropout(0.5) | Info: Dropout coefficient | Currently set to : 0.5 pipe['classifier_dl'].setEnableOutputLogs(True) | Info: Whether to use stdout in addition to Spark logs. | Currently set to : True >>> pipe['default_tokenizer'] has settable params: pipe['default_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+ pipe['default_tokenizer'].setContextChars(['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]) | Info: character list used to separate from token boundaries | Currently set to : ['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"] pipe['default_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True pipe['default_tokenizer'].setMinLength(0) | Info: Set the minimum allowed legth for each token | Currently set to : 0 pipe['default_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed legth for each token | Currently set to : 99999 >>> pipe['sentence_detector'] has settable params: pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : [] pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0 pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999 >>> pipe['default_name'] has settable params: pipe['default_name'].setDimension(512) | Info: Number of embedding dimensions | Currently set to : 512 pipe['default_name'].setLoadSP(False) | Info: Whether to load SentencePiece ops file which is required only by multi-lingual models. This is not changeable after it's set with a pretrained model nor it is compatible with Windows. | Currently set to : False pipe['default_name'].setStorageRef('tfhub_use') | Info: unique reference name for identification | Currently set to : tfhub_use >>> pipe['document_assembler'] has settable params: pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink ###Markdown 6. Retrain with new parameters ###Code # Train longer! trainable_pipe['classifier_dl'].setMaxEpochs(5) fitted_pipe = trainable_pipe.fit(train_df.iloc[:100]) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document') #sentence detector that is part of the pipe generates sone NaNs. lets drop them first preds.dropna(inplace=True) print(classification_report(preds['y'], preds['category'])) preds ###Output precision recall f1-score support average 0.43 0.90 0.58 29 good 0.80 0.84 0.82 38 poor 0.00 0.00 0.00 33 accuracy 0.58 100 macro avg 0.41 0.58 0.47 100 weighted avg 0.43 0.58 0.48 100 ###Markdown 7. Try training with different Embeddings ###Code # We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert! nlu.print_components(action='embed_sentence') from sklearn.metrics import classification_report trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier') # We need to train longer and user smaller LR for NON-USE based sentence embeddings usually # We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch # Also longer training gives more accuracy trainable_pipe['classifier_dl'].setMaxEpochs(90) trainable_pipe['classifier_dl'].setLr(0.0005) fitted_pipe = trainable_pipe.fit(train_df) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df,output_level='document') #sentence detector that is part of the pipe generates sone NaNs. lets drop them first preds.dropna(inplace=True) print(classification_report(preds['y'], preds['category'])) #preds ###Output sent_small_bert_L12_768 download started this may take some time. Approximate size to download 392.9 MB [OK!] precision recall f1-score support average 0.73 0.66 0.69 392 good 0.84 0.87 0.86 408 poor 0.79 0.83 0.81 400 accuracy 0.79 1200 macro avg 0.79 0.79 0.79 1200 weighted avg 0.79 0.79 0.79 1200 ###Markdown 7.1 evaluate on Test Data ###Code preds = fitted_pipe.predict(test_df,output_level='document') #sentence detector that is part of the pipe generates sone NaNs. lets drop them first preds.dropna(inplace=True) print(classification_report(preds['y'], preds['category'])) ###Output precision recall f1-score support average 0.70 0.66 0.68 108 good 0.79 0.82 0.80 92 poor 0.75 0.77 0.76 100 accuracy 0.74 300 macro avg 0.74 0.75 0.75 300 weighted avg 0.74 0.74 0.74 300 ###Markdown 8. Lets save the model ###Code stored_model_path = './models/classifier_dl_trained' fitted_pipe.save(stored_model_path) ###Output Stored model in ./models/classifier_dl_trained ###Markdown 9. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk. ###Code hdd_pipe = nlu.load(path=stored_model_path) preds = hdd_pipe.predict('It worked perfectly.') preds hdd_pipe.print_info() ###Output The following parameters are configurable for this NLU pipeline (You can copy paste the examples) : >>> pipe['document_assembler'] has settable params: pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink >>> pipe['sentence_detector'] has settable params: pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : [] pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999 pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0 pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False >>> pipe['regex_tokenizer'] has settable params: pipe['regex_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True pipe['regex_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+ pipe['regex_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed length for each token | Currently set to : 99999 pipe['regex_tokenizer'].setMinLength(0) | Info: Set the minimum allowed length for each token | Currently set to : 0 >>> pipe['glove'] has settable params: pipe['glove'].setBatchSize(32) | Info: Batch size. Large values allows faster processing but requires more memory. | Currently set to : 32 pipe['glove'].setCaseSensitive(False) | Info: whether to ignore case in tokens for embeddings matching | Currently set to : False pipe['glove'].setDimension(768) | Info: Number of embedding dimensions | Currently set to : 768 pipe['glove'].setMaxSentenceLength(128) | Info: Max sentence length to process | Currently set to : 128 pipe['glove'].setIsLong(False) | Info: Use Long type instead of Int type for inputs buffer - Some Bert models require Long instead of Int. | Currently set to : False pipe['glove'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768 >>> pipe['classifier_dl'] has settable params: pipe['classifier_dl'].setClasses(['average', 'poor', 'good']) | Info: get the tags used to trained this NerDLModel | Currently set to : ['average', 'poor', 'good'] pipe['classifier_dl'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
.ipynb_checkpoints/Project Document-checkpoint.ipynb
###Markdown Data intelligence Application Project IntroductionThe goal of the project is to find the best joint bidding and pricing strategy to attract more user on an e-commerce website. In this way we can sell more items and also try to fidelize the users after the first purhcase. First of all we define the scenario, the product we want to sell and the idea behind the choice we make Product to sellThe product that we want to sell is an anti wrinkle cream.We choose this product because nowadays people in general take more care about their beauty and more specifically on their skins. A cream is a product for everyone, easy to sell and to advertise that can be manufactured with low price and sold at an higher price, if well sposorized.A cream is also a product that can be buyed with a fixed frequency (monthly in our case) and, if a person think that the product is good, he is going to buy it over time User Features and ClassesWe have identified 3 classes of potential buyer characterizeb by 2 principal features Features* *Gender*: * Male * Female* *Age*: * Young, before 30y/o * Adult, after 30y/o Classes:| | Female | Male ||-------|:----------:|--------------:|| **Young** | x | || **Adult** | x | x | * **Male-Adult(C1)**: This is the less intrested class in buying the product but we think that the are many pontential buyers, they are not willing to pay much for a cream* **Female-Adult(C2)**: This is the more intrested class, they are willing to spend much for the product, first of all because they have the economic availability and because a cheaper product may be intrepreted as a bad quality product* **Female-Young(C3)**: This class does not have high economic capabilities, so the price need to be less then the price for the Class 2, also because they usually don't care about the quality Enviroment Conversion RateFor the conversion rate we have defined 10 points for each class and using a quintic regression we have estimated a function for each one ###Code import matplotlib.pyplot as plt import numpy as np import math terms1 = [ 2.7201569788151064e-001, 1.0492858071043640e-001, -7.7219072733113589e-003, -7.8440033424782515e-005, 1.5070734117821789e-005, -2.6951793857949948e-007 ] def regressC1(x): t = 1 r = 0 for c in terms1: r += c * t t *= x return r terms2 = [ 2.8737573993069027e-001, 5.9510040146659365e-002, -3.3835350515498425e-003, 3.3437236649835021e-004, -2.1899899222428644e-005, 4.0670620526356083e-007 ] def regressC2(x): t = 1 r = 0 for c in terms2: r += c * t t *= x return r terms3 = [ 3.3159910846623597e-001, 6.2264260733061391e-002, -1.0506786208669700e-003, -5.9692898283301293e-004, 3.3636918621988781e-005, -5.0878883129873533e-007 ] def regressC3(x): t = 1 r = 0 for c in terms3: r += c * t t *= x return r prices=np.intc([2 ,5,7,9, 10,12, 15, 20, 25, 30 ]) fig = plt.figure() plt.plot(prices,regressC1(prices), label="Male-Adult") plt.plot(prices,regressC2(prices), label="Female-Adult") plt.plot(prices,regressC3(prices), label="Female-Young") plt.title("Conversion Rates") plt.xlabel("Price (โ‚ฌ)") plt.ylabel("Conversion Rate") plt.legend() plt.show() ###Output _____no_output_____ ###Markdown Cost Per ClickThe cost of a single click depend on the bid, we tought that a reasonable random variabile that follow a uniform distribution with a=bid-0.05*bid and b=bid+0.05*bid ###Code def costPerClick(bid): s= np.random.uniform(bid-0.05*bid, bid+0.05*bid,1) return(s) ###Output _____no_output_____ ###Markdown Number of clickAlso the number of click depend on the bid, and we tought that a reasonable function that simulate the increasing behavior can be the tanh function. We cap the maximum daily click at 500 per day.The randomness is added trought a random variable that follow a uniform distribution which support is$x\in[(1-0.05)*500*tanh(bid),(1+0.05)*500*tanh(bid)]$ ###Code def nrDailyClick(bid): meanNrClick = math.trunc(500*math.tanh(bid)) nrClick= math.trunc(np.random.uniform(meanNrClick-0.05*meanNrClick, meanNrClick+0.05*meanNrClick,1)[0]) return nrClick bids= np.linspace(0,3.0, 100) fig = plt.figure() for bid in bids: plt.scatter(bid,nrDailyClick(bid), color="blue") plt.title("Nr click function") plt.xlabel("Bid (โ‚ฌ)") plt.ylabel("Nr Daily Click") plt.show() ###Output _____no_output_____ ###Markdown Distribution probability over the number of times the user will come backThe probability that the user will come back to the e-commerce website to buy that item by 30 days after the first purchase is simulated trhought this function$p(month)=\frac{month}{month+2}$ where $x$ is the month we are referring.This comes by the fact that the first time the user buy the product we have a 0.33 probability that the user come back, due the fact that we don't know if he like the product and during this period he may find another product with better price.More the user buy the product in a row more is the probability that he will buy next time. This because he is fidelized and less prone to change it with other product independently on the price ###Code def nextTimeProbability(month): return(month/(2+month)) months=np.linspace(1,12,12) fig = plt.figure() for month in months: plt.scatter(month,nextTimeProbability(month), color="blue") plt.title("Next time probability") plt.xlabel("Month") plt.ylabel("Probability") plt.show() ###Output _____no_output_____ ###Markdown Step 1 The goal of the first step is to formulate the objective function, with the assumptions that once a user makes a purchase of an item that costs a certain price, then the ecommerce will propose the same price to future visits of the same user and this user will buy the product for sure. * $i$ is the class* $j$ is the price of the product* $p_{ij}$ is the probability that the class will buy the product at a given price $j$ (conversion rate)* $c$ is the fixed cost* $x$ is the bid* $v(x)$ is the stochastic cost per click depending on the bid* $t$ is the time frame we consider.* $n(x)$ is the stochastic number of click depending on the bid x.* $\varphi$ is the number of times a customer will buy the product againThe goal is to maximize the profit, which yields the following model\begin{equation*} max \sum_{0}^{T} \sum_{i}^{3}(\varphi \cdot p_{ij}(j-c)-v(x))*n_{i,t}(x)\end{equation*}The joint pricing/bidding algorithm is then as followingFor every class $i \in I$For every possible bid $x \in X$For possible price $p \in P$\begin{equation*} j,x = arg max(\varphi \cdot p_{ij}(j-c)-v(x))n_{i,t}(x)\end{equation*}the complexity is in $O(|I|*|X|*|P|)$ Step 2In the step 2 we need to consider the online learning version of the above optimization problem when the parameters are not know.The random variables wich we don't know a priori are:* Number of daily click* Cost per click* Conversion RateThe model for each one of the random variable are explained in the Enviroment. Potential delay in the feedbackThe potential delay in the feedback is given by the fact that once a user click on the ad he may need many days to decide wether complete the purchase or not, so we need to take track of the sigle user using for example cookies. In this way we can use this data to estimate also e number of user that didn't make the purchase we click the ad and use this information futurely to update estimation. Step 3In the third step we need to consider the case in which the bid is fixed and try to learn in online fashion the best pricing strategy when there is no distinction among the classes. We also assume that the number of daily click and the daily cost per click are known.To do that we first of all make an average of the three different conversion rate of the three classes.We fix the bid and the cost per click as 1.0 while the number of daily click is set to 380.Every day we pull an arm that represent a price. The experiment lasts 365 days.For every click, we play a round and we get a reward that can be 0(user didn't buy the product) or 1(the user buy have bought the product). If it is 0 we are losing money cause we have to pay the click, otherwise we increment the daily reward.The cumulative reward is calculated as follow: ###Code cumRewardTS+=reward*(prezzi[pulled_armTS]/max_prezzo) cumRewardUCB1+=reward*(prezzi[pulled_armTS]/max_prezzo) ###Output _____no_output_____ ###Markdown This is done because we need to normalize the reward to give more importance to an higher price with respect a lower price. We cannot simply consider the reward itself because the lower prices provide more cumulative rewards in absolute but maybe provide less revenue. Is better to sold 1 cream at 20 euro thant sold 5 cream at 3 euro. At the end of the day we update the distributions associated to the arms. ###Code ts_learner.update(pulled_armTS,cumRewardTS/nrClick) ucb1_learner.update(pulled_armUCB1, cumRewardUCB1/nrClick) ###Output _____no_output_____ ###Markdown As we expected the final revenue provided by the TS is higher than the revenue provided by UCB1. And we have also theoretical guarantee that the regret goes to 0 faster in TS ![alt text](Step3revenue.png "Title") Step 4In the 4th step we make class differentiation.For doing that we have defined a single TS learner. Every day we pull an arm that is the price that is presented to every user indipendently of the class he belongs to.We assume that the number of click are equally distributed among the three classes.In this step when a user see the ad he make a choise accordingly to the specific class probability, this is the big change with respect the step 3. For experiment purpose we simulate also the fact that we can make discrimination at advertise level, this means that user belonging to a different class see different prices. To do this we define three different learners, one for each class assuming that the number of daily click are equally distributed ###Code for t in range (0,T): #pull TS arms pulled_armC1=ts_learnerClass1.pull_arm() pulled_armC2=ts_learnerClass2.pull_arm() pulled_armC3=ts_learnerClass2.pull_arm() pulled_armSingle=ts_learnerSingle.pull_arm() #I pull a new arm every day, and i can distinguish the three different #class, and propose to each of them a different class #Class 1 for x in range(0, nrClickPerClass): #Getting reward from his best arm reward= envClass1.round(pulled_armC1) #calcolo il reward cumRewardTSC1+=reward*(prezzi[pulled_armC1]/max_prezzo) dailyTS+=reward*prezzi[pulled_armC1]-costPerClick #using single learner reward= envClass1.round(pulled_armSingle) dailyTSSingle+= reward*prezzi[pulled_armSingle]-costPerClick cumRewardSingle+=reward*(prezzi[pulled_armSingle]/max_prezzo) #Class 2 for x in range(0, nrClickPerClass): #Getting reward from his best arm reward= envClass2.round(pulled_armC2) #calcolo il reward cumRewardTSC2+=reward*(prezzi[pulled_armC2]/max_prezzo) dailyTS+=reward*prezzi[pulled_armC2]-costPerClick #using single learner reward= envClass2.round(pulled_armSingle) dailyTSSingle+= reward*prezzi[pulled_armSingle]-costPerClick cumRewardSingle+=reward*(prezzi[pulled_armSingle]/max_prezzo) #Class 3 for x in range(0, nrClickPerClass): #Getting reward from his best arm reward= envClass3.round(pulled_armC3) #calcolo il reward cumRewardTSC3+=reward*(prezzi[pulled_armC3]/max_prezzo) dailyTS+=reward*prezzi[pulled_armC3]-costPerClick #using single learner reward= envClass3.round(pulled_armSingle) dailyTSSingle+= reward*prezzi[pulled_armSingle]-costPerClick cumRewardSingle+=reward*(prezzi[pulled_armSingle]/max_prezzo) #make the average of the cumulative reward totalRevenueTS.append(dailyTS) totalRevenueSingle.append(dailyTSSingle) ts_learnerClass1.update(pulled_armC1,cumRewardTSC1/nrClickPerClass) ts_learnerClass2.update(pulled_armC2,cumRewardTSC2/nrClickPerClass) ts_learnerClass3.update(pulled_armC3,cumRewardTSC3/nrClickPerClass) ts_learnerSingle.update(pulled_armSingle, cumRewardSingle/(3*nrClickPerClass)) ###Output _____no_output_____
01 - Statistics & Probability/Statistics Notes/ipynb/01 Statistics Visualizing Information.ipynb
###Markdown Visualizing Information ###Code # importing required libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline plt.style.use('fivethirtyeight') ###Output _____no_output_____ ###Markdown Analysing "Data Flick's solutions private limited"importing data ###Code no_units = pd.read_excel('./data/df_genre_units.xlsx') # reading data no_units = no_units.set_index('Genre') no_units plt.figure(figsize=(6,6)) no_units['Units sold'].plot.pie(autopct='%.0f%%') plt.show() ###Output _____no_output_____ ###Markdown **Visualizing statisfied customers** ###Code satisfied = pd.read_excel('./data/statistics/df_statisfied.xlsx') # reading data satisfied = satisfied.set_index('Genre') satisfied plt.figure(figsize=(6,6)) satisfied['Satisfied'].plot.pie(autopct='%.0f%%') ###Output _____no_output_____ ###Markdown **Bar plot **> vertical plot ###Code plt.figure(figsize=(10,5)) satisfied['Satisfied'].plot.bar() plt.ylabel('Statified customers in %') plt.show() ###Output _____no_output_____ ###Markdown > horizontal plot ###Code plt.figure(figsize=(10,5)) satisfied['Satisfied'].plot.barh() plt.xlabel('Statified customers in %') plt.show() satisfied['unsatisfied'] = 100-satisfied['Satisfied'] satisfied ###Output _____no_output_____ ###Markdown Split Category bar plot> Vertical ###Code plt.figure(figsize=(10,5)) satisfied.plot.bar() plt.ylabel('Statified customers in %') plt.show() ###Output _____no_output_____ ###Markdown > Horizontal ###Code plt.figure(figsize=(15,7)) satisfied.plot.barh() plt.xlabel('Statified customers in %') plt.show() ###Output _____no_output_____ ###Markdown **Split Category bar plot**> Vertical ###Code plt.figure(figsize=(10,5)) satisfied.plot.bar(stacked = True) plt.ylabel('Statified customers in %') plt.show() plt.figure(figsize=(10,5)) satisfied.plot.barh(stacked = True) plt.xlabel('Statified customers in %') plt.show() ###Output _____no_output_____ ###Markdown **Execrise** The CEO needs another chart for the keynote presentation. Hereโ€™s the data; see if you can sketch the bar chart ![image.png](attachment:image.png) ###Code sales_unit = pd.read_excel('./data/statistics/df_salesunit.xlsx') sales = pd.read_excel('./data/statistics/df_sales.xlsx') sales_unit = sales_unit.set_index('Continent ') sales_unit sales = sales.set_index('Genre') sales ###Output _____no_output_____ ###Markdown **Charts** ###Code sales_unit['Sales (unit)'].plot.pie() plt.title('Pie chart') # bar chart sales_unit.plot.bar() plt.title('bar chart') # bar chart sales_unit.plot.barh() plt.title('horizontal bar chart') #plt.show() ###Output _____no_output_____ ###Markdown **Sales** ###Code sales.keys() %matplotlib inline # bar chart sales.plot.bar() plt.title('bar chart') # bar chart sales.plot.barh() plt.title('horizontal bar chart') #plt.show() ###Output _____no_output_____ ###Markdown **Excerise - 2** ###Code score = pd.read_excel('./data/statistics/df_scores.xlsx') score.keys() score = score.set_index('Scores ') score ###Output _____no_output_____ ###Markdown **Charts** ###Code score['Frequency'].plot.pie() plt.title('Pie chart') # bar chart score.plot.bar() plt.title('bar chart') # bar chart score.plot.barh() plt.title('horizontal bar chart') #plt.show() ###Output _____no_output_____
MLCCNumpy_Exercises.ipynb
###Markdown Numpy Exercises1) Create a uniform subdivision of the interval -1.3 to 2.5 with 64 subdivisions ###Code import numpy as np #import numpy a = np.linspace(-1.3, 2.5, num = 64) print (a) ###Output [-1.3 -1.23968254 -1.17936508 -1.11904762 -1.05873016 -0.9984127 -0.93809524 -0.87777778 -0.81746032 -0.75714286 -0.6968254 -0.63650794 -0.57619048 -0.51587302 -0.45555556 -0.3952381 -0.33492063 -0.27460317 -0.21428571 -0.15396825 -0.09365079 -0.03333333 0.02698413 0.08730159 0.14761905 0.20793651 0.26825397 0.32857143 0.38888889 0.44920635 0.50952381 0.56984127 0.63015873 0.69047619 0.75079365 0.81111111 0.87142857 0.93174603 0.99206349 1.05238095 1.11269841 1.17301587 1.23333333 1.29365079 1.35396825 1.41428571 1.47460317 1.53492063 1.5952381 1.65555556 1.71587302 1.77619048 1.83650794 1.8968254 1.95714286 2.01746032 2.07777778 2.13809524 2.1984127 2.25873016 2.31904762 2.37936508 2.43968254 2.5 ] ###Markdown 2) Generate an array of length 3n filled with the cyclic pattern 1, 2, 3 `np.resize(array_name, num) ` gives you a cyclic pattern of the array repeated 'num' times ###Code n = int(input()) b = np.array([1,2,3]) b_modified = np.resize(b, 3*n) print (b_modified) ###Output 4 [1 2 3 1 2 3 1 2 3 1 2 3] ###Markdown 3) Create an array of the first 10 odd integers. `np.arange(start, stop, step)` takes an optional argument which increments start by that step and ends just before stop ###Code c = np.arange(1,20,2) print (c) ###Output [ 1 3 5 7 9 11 13 15 17 19] ###Markdown 4) Find intersection of a and b `np.intersect1d(a,b)` gives the unique and common values between two arrays a and b ###Code #expected output array([2, 4]) a = np.array([1,2,3,2,3,4,3,4,5,6]) b = np.array([7,2,10,2,7,4,9,4,9,8]) res = np.intersect1d(a, b) print (res) ###Output [2 4] ###Markdown 5) Reshape 1d array a to 2d array of 2X5 np.reshape(array_name, (shape)) takes two arguments - an array to which the operation is to be performed and the shape to which the array will be shaped ###Code a = np.arange(10) a_modified = np.reshape(a,(2,5)) print (a_modified) ###Output [[0 1 2 3 4] [5 6 7 8 9]] ###Markdown 6) Create a numpy array to list and vice versa np.asarray(list_argument) converts a list to a numpy array ###Code #Convert list to numpy array list1 = [1, 2, 3, 4, 5, 6, 7, 8, 9] array1 = np.asarray(list1) print (array1) print (type(array1)) ###Output [1 2 3 4 5 6 7 8 9] <class 'numpy.ndarray'> ###Markdown np.ndarray.tolist(array_argument) converts a numpy array to a list ###Code #Convert numpy array to list list2 = np.ndarray.tolist(array1) print (list2) print (type(list2)) ###Output [1, 2, 3, 4, 5, 6, 7, 8, 9] <class 'list'> ###Markdown 7) Create a 10 x 10 arrays of zeros and then "frame" it with a border of ones. np.pad(array_name, pad_width = depth_of_the_border, mode='constant', constant_values = fill_value) gives a border or a pad around an array of depth equal to pad_width filled with values equal to constant_values ###Code a = np.zeros(shape=(10,10), dtype='int') a = np.pad(a, pad_width=1, mode='constant', constant_values=1) print (a) ###Output [[1 1 1 1 1 1 1 1 1 1 1 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 0 0 0 0 0 0 0 0 0 0 1] [1 1 1 1 1 1 1 1 1 1 1 1]] ###Markdown 8) Create an 8 x 8 array with a checkerboard pattern of zeros and ones using a slicing+striding approach. a[start:stop] denotes a slice of the array ait can take a third optional argument which denotes the step with which it elements are chosen from the arraya[start:stop:stride] starts taking values from 'start' till 'stop',moving forward by 'stride' steps ###Code a = np.zeros(shape = (8,8)) a[1::2, ::2] = 1 a[::2, 1::2] = 1 print (a) ###Output [[0. 1. 0. 1. 0. 1. 0. 1.] [1. 0. 1. 0. 1. 0. 1. 0.] [0. 1. 0. 1. 0. 1. 0. 1.] [1. 0. 1. 0. 1. 0. 1. 0.] [0. 1. 0. 1. 0. 1. 0. 1.] [1. 0. 1. 0. 1. 0. 1. 0.] [0. 1. 0. 1. 0. 1. 0. 1.] [1. 0. 1. 0. 1. 0. 1. 0.]]
week-06/ch07-oop-shortcuts.ipynb
###Markdown OOP shortcuts* Built-in functions that take care of common tasks in one call (so useful)* File I/O and context managers* An alternative to method overloading* Functions as objects *len, reverse, enumerate*..., *all, any*..., *eval, exec, compile* (I don't use these...)*hasattr, getattr, setattr, and delattr*, which allow attributes on anobject to be manipulated by their string names.*zip*, which takes two or more sequences and returns a new sequence of tuples, where each tuple contains a single value from each sequence.And many more! See the interpreter help documentation for each of the functions listed in dir(\__builtins__). ###Code a_list = [1,2,3,4,5] print('len() builtin: {}'.format(len(a_list))) print('__len__ attr: {}'.format(a_list.__len__())) print() print('look at reversed') print('a_list: {}'.format(a_list)) print('reversed(a_list): {}'.format(reversed(a_list))) print('the reversed list: {}'.format(list(reversed(a_list)))) print() for item in reversed(a_list): print(item) a_string_list = ['one', 'two', 'three', 'four', 'five'] for i,item in enumerate(a_string_list): print('item {}: {}'.format(i, item)) print() for i,item in zip(a_list, a_string_list): print('item {}: {}'.format(i, item)) print('mismatched lengths: takes first n-elements of second item in zip') for i,item in zip(a_list[1:3], a_string_list): print('item {}: {}'.format(i, item)) dir(__builtins__) dir(sum) ###Output _____no_output_____ ###Markdown File I/O and context managers* writing* reading* context managers ###Code contents = "Some file contents\n" file = open("filename.txt", "w") file.write(contents) file.close() file = open("filename2.txt", "w") file.writelines([contents]*3) file.close() # this overwrites every time... also explicit management of open and close # binary add wb, rb dir(file) ###Output _____no_output_____ ###Markdown context managers* with open(...) as file... calls __enter__ and __exit__ methods on the file object* cleans up need for 'startup' and 'cleanup' code ###Code with open("filename.txt", "w") as file: file.write(contents) with open("filename_seq_cm.txt", "w") as file: file.writelines([contents]*3) with open("./a-text-file.txt", "r") as file: for i, line in enumerate(file.readlines()): print("line {}: {}".format(i+1, line)) # custom string joiner class StringJoiner(list): def __enter__(self): return self def __exit__(self, type, value, tb): self.result = "".join(self) import random, string with StringJoiner() as joiner: for i in range(15): joiner.append(random.choice(string.ascii_letters)) print(joiner.result) # but... same_thing = ''.join([random.choice(string.ascii_letters) for i in range(15)]) print(same_thing) ###Output VjMLIieQzDfnaDl QkxyOnpvtLqjdFA ###Markdown An alternative to method overloading* Python doesn't permit multiple methods with the same name!!* instead, write the function with expectation of argument type ###Code def func1(arg1): print('original func1') pass def func1(arg2): print('the function has been replaced') pass func1('some arg') Python functions accept: * positional arguments * keyword arguments (with default values) def function(pos1, pos2, kwd1='None', kwd2=5): print('pos1: {}'.format(pos1)) print('pos2: {}'.format(pos2)) print('kwd1: {}'.format(kwd1)) print('kwd2: {}'.format(kwd2)) pass #function(1) #function(1,2) #function(1, 2, kwd1='2') #function(2,['hi'], kwd1={'some':'key-value'}, kwd2=('a', 'tuple')) # specify all inputs out of order function(pos2=2, pos1=['hi'], kwd2={'some':'key-value'}, kwd1=('a', 'tuple')) # variable argument lists def extract_list(*somelist): print(somelist) for item in somelist: print(item) extract_list() #extract_list(1) #extract_list(tuple(range(10))) # feel free to fiddle with this one #extract_list(1,2,3,4) #extract_list('item1', 'item2', 'item3') class Options: default_options = { 'port': 21, 'host': 'localhost', 'username': None, 'password': None, 'debug': False } def __init__(self, **kwargs): self.options = dict(Options.default_options) # set the default options self.options.update(kwargs) # update based on user input, can also add keys for the options def __getitem__(self, key): return self.options[key] # sample1 = Options(username='dusty', password='drowssap', some_other_option=True) # print(sample1.options) sample2 = Options() print(sample2.options) sample2['port'] ###Output {'port': 21, 'host': 'localhost', 'username': None, 'password': None, 'debug': False} ###Markdown Ordering of arguments:* positional* \*list argument* keyword arguments* \** dictionary to hold anything else ###Code import shutil import os.path def augmented_move(target_folder, *filenames, verbose=False, **specific): '''Move all filenames into the target_folder, allowing specific treatment of certain files.''' # # print args print('target_folder: {}'.format(target_folder)) print('filenames: {}'.format(filenames)) print('verbose: {}'.format(verbose)) print('specific: {}'.format(specific)) def print_verbose(message, filename): '''print the message only if verbose is enabled''' if verbose: print(message.format(filename)) ## augmented_move block # to get the example to work... filenames = list(filenames) # comes in as a tuple filenames.extend(specific.keys()) # use list.extend for filename in filenames: target_path = os.path.join(target_folder, filename) if filename in specific: if specific[filename] == 'ignore': print_verbose("Ignoring {}", filename) elif specific[filename] == 'copy': print_verbose("Copying {}", filename) shutil.copyfile(filename, target_path) else: try: print_verbose("Moving {}...", filename) shutil.move(filename, target_path) except: print('file {} does not exist'.format(filename)) augmented_move("target_folder", "one", verbose=True, two="ignore", three="ignore") ###Output target_folder: target_folder filenames: ('one',) verbose: True specific: {'two': 'ignore', 'three': 'ignore'} Moving one... file one does not exist Ignoring two Ignoring three ###Markdown Unpacking arguments* can provide arguments as sequence, list, or dictionary ###Code def show_args(arg1, arg2, arg3="THREE"): print(arg1, arg2, arg3) some_args = range(2) more_args = { "arg1": "ONE", "arg2": "TWO" } show_args(*some_args) show_args(**more_args) # a more practical example for us... from shapely.geometry import box bounds = [0,0,5,10] box(*bounds) # help(box) ###Output _____no_output_____ ###Markdown Treating functions like objects* if you want, you can set descriptions or any other named attribute on a function * why would you want to do this?* you have access to the \__name__ attributeLet's go through the timer example because I found it odd... ###Code def function_object(): '''help doc''' pass function_object.whatever = 'something' function_object.whatever, function_object.__name__ # help(function_object) ###Output _____no_output_____ ###Markdown PyTorch DataLoader example - Sampling an image -- won't work if you don't have the data/libraries... so don't worry about it!! I've kept it for the figures. ###Code from test_unet_helpers import get_points_list, gtDatasetSampler2 gt_image_2 = '../smart_cities/rasters/union_impervious_raster_2_0_0_wgs84.tif' dg_id = '1030010057062200' shpfile = '../smart_cities/union/union.shp' coords = get_points_list(gt_image_2, dg_id, shpfile, num=100) coords from torch.utils.data import Dataset import rasterio import numpy as np import torch class gtDatasetSampler2(Dataset): """DG Dataset""" def __init__(self, gtfile, coord_pair, window_size=64, transform=None): """ Args: image_dir(string): the folder containing the DG images transform (callable, optional): Optional transform to be applies """ self.image_file = gtfile self.transform = transform self.coords = coord_pair self.window_size = window_size def __getitem__(self, idx): with rasterio.open(self.image_file, 'r') as src: temp = src.read() # get the window r,c = self.coords[idx] r_start = int(r - self.window_size/2) r_end = int(r_start + self.window_size) c_start = int(c - self.window_size/2) c_end = int(c_start + self.window_size) # extract the window img_arr = temp[0,r_start:r_end, c_start:c_end] img_arr = np.expand_dims(img_arr, axis=0) # set no data to 0 img_arr[img_arr == 3] = 0 # convert to tensor img_arr = torch.from_numpy(img_arr).float() return img_arr def __len__(self): return len(self.coords) from torchvision.transforms import ToTensor, Normalize, Compose from torch.utils.data import DataLoader gt_transform = Compose([ ToTensor() ]) gt_dataset_train = gtDatasetSampler2(gt_image_2, coords, transform=gt_transform) # gt_dl_train = DataLoader(gt_dataset_train, batch_size=5, shuffle=False) len(gt_dataset_train), len(coords) from matplotlib import pyplot as plt plt.imshow(gt_dataset_train[0][0]) plt.show() gt_dataset_train[1].shape ###Output _____no_output_____
docs/notebooks/method.ipynb
###Markdown Edge behavior and interiorsThis notebook illustrates the edge behavior (when a grid point falls on the edge of a polygon) and how polygon interiors are treated. PreparationImport regionmask and check the version: ###Code import regionmask regionmask.__version__ ###Output _____no_output_____ ###Markdown Other imports ###Code import xarray as xr import numpy as np import cartopy.crs as ccrs import matplotlib.pyplot as plt from matplotlib import colors as mplc from shapely.geometry import Polygon ###Output _____no_output_____ ###Markdown Define some colors: ###Code color1 = "#9ecae1" color2 = "#fc9272" color3 = "#cab2d6" cmap1 = mplc.ListedColormap([color1]) cmap2 = mplc.ListedColormap([color2]) cmap3 = mplc.ListedColormap([color3]) ###Output _____no_output_____ ###Markdown MethodsRegionmask offers three "methods"* to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.3. `pygeos`: a faster alternative for irregular grids. This is method is preferred over (2) if the optional dependency pygeos is installed. Uses `pygeos.STRtree` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. Methods (1) and (2) have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) and (3) subtract a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1).\*Note that all "methods" yield the same results. Edge behaviorThe edge behavior determines how points that fall on the outline of a region are treated. It's easiest to see the edge behaviour in an example. ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border: ###Code outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]]) region = regionmask.Regions([outline]) ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-161, -29, 2), *(75, 13, -2) ) print(ds_US) ###Output _____no_output_____ ###Markdown Let's create a mask with each of these methods: ###Code mask_rasterize = region.mask(ds_US, method="rasterize") mask_shapely = region.mask(ds_US, method="shapely") mask_pygeos = region.mask(ds_US, method="pygeos") ###Output _____no_output_____ ###Markdown .. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword. ###Code Plot the masked regions: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)mask_pygeos.plot(ax=axes[2], cmap=cmap3, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely")axes[2].set_title("pygeos"); ###Code Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but follows what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows. ### SREX regions Create a global dataset: ###Output _____no_output_____ ###Markdown ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("edge points are assigned to the left polygon", fontsize=9); ###Code Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells. ### Points at -180ยฐE (0ยฐE) and -90ยฐN The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region. ###Output _____no_output_____ ###Markdown .. note:: From version 0.8 this applies only if ``wrap_lon`` is *not* set to ``False``. If wrap_lon is set to False `regionmask` assumes the coordinates are not lat and lon coordinates. ###Code We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid: ###Output _____no_output_____ ###Markdown outline_global = np.array( [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]])region_global = regionmask.Regions([outline_global])lon = np.arange(-180, 180, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat) ###Code Create the masks: ###Output _____no_output_____ ###Markdown setting `wrap_lon=False` turns this feature offmask_global_nontreat = region_global.mask(lon, lat, wrap_lon=False)mask_global = region_global.mask(lon, lat) ###Code And illustrate the issue: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.3", lw=0.5, transform=ccrs.PlateCarree())ax = axes[0] work around for SciTools/cartopy/issues/1845mask_global_nontreat = mask_global_nontreat.fillna(1)mask_global_nontreat.plot(ax=ax, colors=[color1, "none"], levels=2, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Not treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, **opt)ax.set_title("Treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False) ###Code In the example the region spans the whole globe and there are gridpoints at -180ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b): Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE and not the one at -180ยฐE (this is consistent with assigning points to the polygon *left* from it) and (ii) only the points at -90ยฐN get assigned to the region above. This is illustrated in the figure below: ###Output _____no_output_____ ###Markdown outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot( line_kws=dict(color="b15928", zorder=3, lw=1), add_label=False,)ax.plot(LON, LAT, "o", color="0.3", ms=2, transform=ccrs.PlateCarree(), zorder=5) work around for SciTools/cartopy/issues/1845mask_global_2regions = mask_global_2regions.fillna(2)mask_global_2regions.plot(ax=ax, colors=[color1, color2, "none"], levels=3, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.5)ax.outline_patch.set_zorder(1); ###Code .. note:: This only applies if the border of the region falls *exactly* on the point. One way to avoid the problem is to calculate the `fractional overlap <https://github.com/regionmask/regionmask/issues/38>`_ of each gridpoint with the regions (which is not yet implemented). ###Output _____no_output_____ ###Markdown Polygon interiors`Polygons` can have interior boundaries ('holes'). regionmask unmasks these regions. ExampleLet's test this on an example and define a `region_with_hole`: ###Code interior = np.array( [ [-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0], [-86.0, 44.0], ] ) poly = Polygon(outline, holes=[interior]) region_with_hole = regionmask.Regions([poly]) mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize") mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely") mask_hole_pygeos = region_with_hole.mask(ds_US, method="pygeos") f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree())) opt = dict(add_colorbar=False, ec="0.5", lw=0.5) mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt) mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt) mask_hole_pygeos.plot(ax=axes[2], cmap=cmap3, **opt) for ax in axes: region_with_hole.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1)) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.25) ax.plot( ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) axes[0].set_title("rasterize") axes[1].set_title("shapely") axes[2].set_title("pygeos"); ###Output _____no_output_____ ###Markdown Note how the edge behavior of the interior is inverse to the edge behavior of the outerior. Caspian SeaThe Caspian Sea is defined as polygon interior. ###Code land110 = regionmask.defined_regions.natural_earth.land_110 mask_land110 = land110.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree())) mask_land110.plot(ax=ax, cmap=cmap2, add_colorbar=False) ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree()) ax.coastlines(resolution="50m", lw=0.5) ax.plot( ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree()) ax.set_title("Polygon interiors are unmasked"); ###Output _____no_output_____ ###Markdown Edge behavior and interiorsThis notebook illustrates the edge behavior (when a grid point falls on the edge of a polygon) and how polygon interiors are treated. PreparationImport regionmask and check the version: ###Code import regionmask regionmask.__version__ ###Output _____no_output_____ ###Markdown Other imports ###Code import xarray as xr import numpy as np import cartopy.crs as ccrs import matplotlib.pyplot as plt from matplotlib import colors as mplc from shapely.geometry import Polygon ###Output _____no_output_____ ###Markdown Define some colors: ###Code color1 = "#9ecae1" color2 = "#fc9272" color3 = "#cab2d6" cmap1 = mplc.ListedColormap([color1]) cmap2 = mplc.ListedColormap([color2]) cmap3 = mplc.ListedColormap([color3]) cmap12 = mplc.ListedColormap([color1, color2]) ###Output _____no_output_____ ###Markdown MethodsRegionmask offers three "methods"* to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.3. `pygeos`: a faster alternative for irregular grids. This is method is preferred over (2) if the optional dependency pygeos is installed. Uses `pygeos.STRtree` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. All methods have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) and (3) subtract a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1).\*Note that all "methods" yield the same results. Edge behaviorThe edge behavior determines how points that fall on the outline of a region are treated. It's easiest to see the edge behaviour in an example. ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border: ###Code outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]]) region = regionmask.Regions([outline]) ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-161, -29, 2), *(75, 13, -2) ) print(ds_US) ###Output _____no_output_____ ###Markdown Let's create a mask with each of these methods: ###Code mask_rasterize = region.mask(ds_US, method="rasterize") mask_shapely = region.mask(ds_US, method="shapely") mask_pygeos = region.mask(ds_US, method="pygeos") ###Output _____no_output_____ ###Markdown .. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword. ###Code Plot the masked regions: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)mask_pygeos.plot(ax=axes[2], cmap=cmap3, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely")axes[2].set_title("pygeos"); ###Code Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but follows what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows. ### SREX regions Create a global dataset: ###Output _____no_output_____ ###Markdown ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("edge points are assigned to the left polygon", fontsize=9); ###Code Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells. ### Points at -180ยฐE (0ยฐE) and -90ยฐN The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region. ###Output _____no_output_____ ###Markdown .. note:: From version 0.8 this applies only if ``wrap_lon`` is *not* set to ``False``. If wrap_lon is set to False `regionmask` assumes the coordinates are not lat and lon coordinates. ###Code We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid: ###Output _____no_output_____ ###Markdown almost 360 to avoid wrap-around for the plotlon_max = 360.0 - 1e-10outline_global = np.array([[0, 90], [0, -90], [lon_max, -90], [lon_max, 90]])region_global = regionmask.Regions([outline_global])lon = np.arange(0, 360, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat) ###Code Create the masks: ###Output _____no_output_____ ###Markdown setting `wrap_lon=False` turns this feature offmask_global_nontreat = region_global.mask(LON, LAT, wrap_lon=False)mask_global = region_global.mask(LON, LAT) ###Code And illustrate the issue: ###Output _____no_output_____ ###Markdown proj = ccrs.PlateCarree(central_longitude=180)f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=proj))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.2", lw=0.25, transform=ccrs.PlateCarree())ax = axes[0]mask_global_nontreat.plot(ax=ax, cmap=cmap1, x="lon", y="lat", **opt)ax.set_title("Not treating points at 0ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, x="lon", y="lat", **opt)ax.set_title("Treating points at 0ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False) ###Code In the example the region spans the whole globe and there are gridpoints at 0ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b): Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE and not the one at -180ยฐE (this is consistent with assigning points to the polygon *left* from it) and (ii) only the points at -90ยฐN get assigned to the region above. This is illustrated in the figure below: ###Output _____no_output_____ ###Markdown outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot( line_kws=dict(color="b15928", zorder=3, lw=1.5), add_label=False,)ax.plot( LON, LAT, "o", color="0.3", lw=0.25, ms=2, transform=ccrs.PlateCarree(), zorder=5)mask_global_2regions.plot(ax=ax, cmap=cmap12, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.25)ax.outline_patch.set_zorder(1); ###Code .. note:: This only applies if the border of the region falls *exactly* on the point. One way to avoid the problem is to calculate the fractional overlap (see :issue:`38`) of each gridpoint with the regions (which is not yet implemented). ###Output _____no_output_____ ###Markdown Polygon interiors`Polygons` can have interior boundaries ('holes'). regionmask unmasks these regions. ExampleLet's test this on an example and define a `region_with_hole`: ###Code interior = np.array( [ [-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0], [-86.0, 44.0], ] ) poly = Polygon(outline, holes=[interior]) region_with_hole = regionmask.Regions([poly]) mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize") mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely") mask_hole_pygeos = region_with_hole.mask(ds_US, method="pygeos") f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree())) opt = dict(add_colorbar=False, ec="0.5", lw=0.5) mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt) mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt) mask_hole_pygeos.plot(ax=axes[2], cmap=cmap3, **opt) for ax in axes: region_with_hole.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1)) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.25) ax.plot( ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) axes[0].set_title("rasterize") axes[1].set_title("shapely") axes[2].set_title("pygeos"); ###Output _____no_output_____ ###Markdown Note how the edge behavior of the interior is inverse to the edge behavior of the outerior. Caspian SeaThe Caspian Sea is defined as polygon interior. ###Code land110 = regionmask.defined_regions.natural_earth_v5_0_0.land_110 mask_land110 = land110.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree())) mask_land110.plot(ax=ax, cmap=cmap2, add_colorbar=False) ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree()) ax.coastlines(resolution="50m", lw=0.5) ax.plot( ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree()) ax.set_title("Polygon interiors are unmasked"); ###Output _____no_output_____ ###Markdown Edge behavior and interiorsThis notebook illustrates the edge behavior and how Polygon interiors are treated. ###Code .. note:: From version 0.5 ``regionmask`` treats points on the region borders differently and also considers poygon interiors (holes), e.g. the Caspian Sea in ``natural_earth.land_110`` region. ###Output _____no_output_____ ###Markdown PreparationImport regionmask and check the version: ###Code import regionmask regionmask.__version__ ###Output _____no_output_____ ###Markdown Other imports ###Code import xarray as xr import numpy as np import cartopy.crs as ccrs import matplotlib.pyplot as plt from matplotlib import colors as mplc from shapely.geometry import Polygon ###Output _____no_output_____ ###Markdown Define some colors: ###Code cmap1 = mplc.ListedColormap(["#9ecae1"]) cmap2 = mplc.ListedColormap(["#fc9272"]) cmap3 = mplc.ListedColormap(["#cab2d6"]) cmap_2col = mplc.ListedColormap(["#9ecae1", "#fc9272"]) ###Output _____no_output_____ ###Markdown MethodsRegionmask offers two methods to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. Methods (1) and (2) have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) subtracts a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio/1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1). Edge behaviorAs of version 0.5 `regionmask` has a new edge behavior - points that fall of the outline of a region are now consistently treated. This was not the case in earlier versions (xref [matplotlib/matplotlib9704](https://github.com/matplotlib/matplotlib/issues/9704)). It's easiest to see the edge behaviour in an ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border: ###Code outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]]) region = regionmask.Regions([outline]) ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-161, -29, 2), *(75, 13, -2) ) print(ds_US) ###Output _____no_output_____ ###Markdown Let's create a mask with each of these methods: ###Code mask_rasterize = region.mask(ds_US, method="rasterize") mask_shapely = region.mask(ds_US, method="shapely") ###Output _____no_output_____ ###Markdown .. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword. ###Code Plot the masked regions: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely") ###Code Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but mimicks what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows. ### SREX regions Create a global dataset: ###Output _____no_output_____ ###Markdown ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("new (rasterize + shapely)"); ###Code Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells. ### Points at -180ยฐE (0ยฐE) and -90ยฐN The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region. We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid: ###Output _____no_output_____ ###Markdown outline_global = np.array( [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]])region_global = regionmask.Regions([outline_global])lon = np.arange(-180, 180, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat) ###Code Create the masks: ###Output _____no_output_____ ###Markdown mask_global = region_global.mask(lon, lat) we need to manually create the maskmask_global_nontreat = mask_global.copy()mask_global_nontreat[-1, :] = np.NaNmask_global_nontreat[:, 0] = np.NaN ###Code And illustrate the issue: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.3", lw=0.5, transform=ccrs.PlateCarree())ax = axes[0]mask_global_nontreat.plot(ax=ax, cmap=cmap1, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Not treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, **opt)ax.set_title("Treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False) ###Code In the example the region spans the whole globe and there are gridpoints at -180ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b): Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE (and not the one at -180ยฐE) and (ii) only the points at -90ยฐN get assigned to the region above. ###Output _____no_output_____ ###Markdown outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot(line_kws=dict(color="b15928", zorder=3), add_label=False,)ax.plot(LON, LAT, "o", color="0.3", ms=2, transform=ccrs.PlateCarree(), zorder=5)mask_global_2regions.plot(ax=ax, cmap=cmap_2col, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.5)ax.outline_patch.set_zorder(1); ###Code .. note:: This only applies if the border of the region falls exactly on the point. One way to avoid the problem is to calculate the `fractional overlap <https://github.com/regionmask/regionmask/issues/38>`_ of each gridpoint with the regions (which is not yet implemented). ###Output _____no_output_____ ###Markdown Polygon interiors`Polygons` can have interior boundaries ('holes'). Prior to version 0.5.0 these were not considered and e.g. the Caspian Sea was not 'unmasked'. ExampleLet's test this on an example and define a `region_with_hole`: ###Code interior = np.array( [[-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0], [-86.0, 44.0],] ) poly = Polygon(outline, [interior]) region_with_hole = regionmask.Regions([poly]) mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize") mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely") f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree())) opt = dict(add_colorbar=False, ec="0.5", lw=0.5) mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt) mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt) for ax in axes: region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) axes[0].set_title("rasterize") axes[1].set_title("shapely"); ###Output _____no_output_____ ###Markdown Caspian Sea ###Code land110 = regionmask.defined_regions.natural_earth.land_110 land_new = land110.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree())) opt = dict(add_colorbar=False) land_new.plot(ax=ax, cmap=cmap2, **opt) ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree()) ax.coastlines(resolution="50m", lw=0.5) ax.plot( ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree()) ax.set_title("Polygon interiors are unmasked"); ###Output _____no_output_____ ###Markdown Edge behavior and interiorsThis notebook illustrates the edge behavior and how Polygon interiors are treated. ###Code .. note:: From version 0.5 ``regionmask`` treats points on the region borders differently and also considers poygon interiors (holes), e.g. the Caspian Sea in ``natural_earth.land_110`` region. ###Output _____no_output_____ ###Markdown PreparationImport regionmask and check the version: ###Code import regionmask regionmask.__version__ ###Output _____no_output_____ ###Markdown Other imports ###Code import xarray as xr import numpy as np import cartopy.crs as ccrs import matplotlib.pyplot as plt from matplotlib import colors as mplc from shapely.geometry import Polygon ###Output _____no_output_____ ###Markdown Define some colors: ###Code cmap1 = mplc.ListedColormap(["#9ecae1"]) cmap2 = mplc.ListedColormap(["#fc9272"]) cmap3 = mplc.ListedColormap(["#cab2d6"]) cmap_2col = mplc.ListedColormap(["#9ecae1", "#fc9272"]) ###Output _____no_output_____ ###Markdown MethodsRegionmask offers two methods to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. Methods (1) and (2) have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) subtracts a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio/1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1). Edge behaviorAs of version 0.5 `regionmask` has a new edge behavior - points that fall of the outline of a region are now consistently treated. This was not the case in earlier versions (xref [matplotlib/matplotlib9704](https://github.com/matplotlib/matplotlib/issues/9704)). It's easiest to see the edge behaviour in an ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border: ###Code outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]]) region = regionmask.Regions([outline]) ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-161, -29, 2), *(75, 13, -2) ) print(ds_US) ###Output _____no_output_____ ###Markdown Let's create a mask with each of these methods: ###Code mask_rasterize = region.mask(ds_US, method="rasterize") mask_shapely = region.mask(ds_US, method="shapely") ###Output _____no_output_____ ###Markdown .. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword. ###Code Plot the masked regions: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely") ###Code Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but mimicks what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows. ### SREX regions Create a global dataset: ###Output _____no_output_____ ###Markdown ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("new (rasterize + shapely)"); ###Code Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells. ### Points at -180ยฐE (0ยฐE) and -90ยฐN The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region. We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid: ###Output _____no_output_____ ###Markdown outline_global = np.array( [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]])region_global = regionmask.Regions([outline_global])lon = np.arange(-180, 180, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat) ###Code Create the masks: ###Output _____no_output_____ ###Markdown mask_global = region_global.mask(lon, lat) we need to manually create the maskmask_global_nontreat = mask_global.copy()mask_global_nontreat[-1, :] = np.NaNmask_global_nontreat[:, 0] = np.NaN ###Code And illustrate the issue: ###Output _____no_output_____ ###Markdown f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.3", lw=0.5, transform=ccrs.PlateCarree())ax = axes[0]mask_global_nontreat.plot(ax=ax, cmap=cmap1, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Not treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, **opt)ax.set_title("Treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False) ###Code In the example the region spans the whole globe and there are gridpoints at -180ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b): Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE (and not the one at -180ยฐE) and (ii) only the points at -90ยฐN get assigned to the region above. ###Output _____no_output_____ ###Markdown outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot(line_kws=dict(color="b15928", zorder=3), add_label=False,)ax.plot(LON, LAT, "o", color="0.3", ms=2, transform=ccrs.PlateCarree(), zorder=5)mask_global_2regions.plot(ax=ax, cmap=cmap_2col, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.5)ax.outline_patch.set_zorder(1); ###Code .. note:: This only applies if the border of the region falls exactly on the point. One way to avoid the problem is to calculate the `fractional overlap <https://github.com/mathause/regionmask/issues/38>`_ of each gridpoint with the regions (which is not yet implemented). ###Output _____no_output_____ ###Markdown Polygon interiors`Polygons` can have interior boundaries ('holes'). Prior to version 0.5.0 these were not considered and e.g. the Caspian Sea was not 'unmasked'. ExampleLet's test this on an example and define a `region_with_hole`: ###Code interior = np.array( [[-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0], [-86.0, 44.0],] ) poly = Polygon(outline, [interior]) region_with_hole = regionmask.Regions([poly]) mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize") mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely") f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree())) opt = dict(add_colorbar=False, ec="0.5", lw=0.5) mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt) mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt) for ax in axes: region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) axes[0].set_title("rasterize") axes[1].set_title("shapely"); ###Output _____no_output_____ ###Markdown Caspian Sea ###Code land110 = regionmask.defined_regions.natural_earth.land_110 land_new = land110.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree())) opt = dict(add_colorbar=False) land_new.plot(ax=ax, cmap=cmap2, **opt) ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree()) ax.coastlines(resolution="50m", lw=0.5) ax.plot( ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree() ) ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree()) ax.set_title("Polygon interiors are unmasked"); ###Output _____no_output_____
object_tracking_from_video.ipynb
###Markdown object_tracking_from_videoIn this notebook we apply object tracking using SORT to determine the motion of people in a video, and count the number of people crossing a line. The general idea and the video are taken from https://www.pyimagesearch.com/2018/08/13/opencv-people-counter/In the video there are 6 people in total - 4 walk noth-south and 2 south-north ###Code #!pip install -r requirements.txt from pathlib import Path import pandas as pd import cv2 from IPython.display import Image import deepstack.core as ds import json import numpy as np from sort.sort import * %matplotlib inline videos = list(Path("video/").rglob("*.mp4")) video_path = str(videos[0]) # use video 0 print(video_path) video_path ###Output _____no_output_____ ###Markdown Read the frames and write to a temp folder (contents are gitignored to keep repo small) ###Code FRAME_SAMPLING = 5 # video is at 30 FPS so down sample to limit compute vidcap = cv2.VideoCapture(video_path) success, frame = vidcap.read() count = 0 cv2.imwrite("tmp/frame%d.jpg" % count, frame) # save frame as JPEG file frames = list(Path("tmp/").rglob("*.jpg")) frame_path = str(frames[0]) print(frame_path) Image(frame_path) ###Output _____no_output_____ ###Markdown Now iterate over all frames, saving if they are at the FRAME_SAMPLING. Use a list of dictionaries to keep info about the frames ###Code results = [] true_count = 0 # in the original video frame_count = 0 # in the sampled images vidcap = cv2.VideoCapture(video_path) success, frame = vidcap.read() while success: success, frame = vidcap.read() frame_path = f"tmp/frame_{frame_count}.jpg" if true_count % FRAME_SAMPLING == 0: cv2.imwrite(frame_path, frame) # save frame as JPEG file frame_info = {} frame_info['true_count'] = true_count frame_info['frame_count'] = frame_count frame_info['frame_path'] = frame_path results.append(frame_info) frame_count += 1 true_count += 1 print(results[0]) print(len(results)) ###Output {'true_count': 0, 'frame_count': 0, 'frame_path': 'tmp/frame_0.jpg'} 107 ###Markdown Process frames to extract object bounding boxes ###Code # deepstack credentials IP_ADDRESS = 'localhost' PORT = 80 API_KEY = "" # if you have not set an api_key, just pass an empty string dsobject = ds.DeepstackObject(IP_ADDRESS, PORT, API_KEY) %%time for i, frame_info in enumerate(results): image = str(frame_info['frame_path']) try: with open(image, 'rb') as image_bytes: predictions = dsobject.detect(image_bytes) frame_info['predictions'] = predictions frame_info['persons'] = len([p for p in predictions if p['label']=='person']) print(f"Processing image number {i} : {image} : {frame_info['persons']} persons") except Exception as exc: print(exc) ###Output Processing image number 0 : tmp/frame_0.jpg : 0 persons Processing image number 1 : tmp/frame_1.jpg : 0 persons Processing image number 2 : tmp/frame_2.jpg : 0 persons Processing image number 3 : tmp/frame_3.jpg : 0 persons Processing image number 4 : tmp/frame_4.jpg : 0 persons Processing image number 5 : tmp/frame_5.jpg : 0 persons Processing image number 6 : tmp/frame_6.jpg : 0 persons Processing image number 7 : tmp/frame_7.jpg : 0 persons Processing image number 8 : tmp/frame_8.jpg : 0 persons Processing image number 9 : tmp/frame_9.jpg : 1 persons Processing image number 10 : tmp/frame_10.jpg : 1 persons Processing image number 11 : tmp/frame_11.jpg : 0 persons Processing image number 12 : tmp/frame_12.jpg : 0 persons Processing image number 13 : tmp/frame_13.jpg : 0 persons Processing image number 14 : tmp/frame_14.jpg : 0 persons Processing image number 15 : tmp/frame_15.jpg : 0 persons Processing image number 16 : tmp/frame_16.jpg : 0 persons Processing image number 17 : tmp/frame_17.jpg : 0 persons Processing image number 18 : tmp/frame_18.jpg : 0 persons Processing image number 19 : tmp/frame_19.jpg : 0 persons Processing image number 20 : tmp/frame_20.jpg : 3 persons Processing image number 21 : tmp/frame_21.jpg : 1 persons Processing image number 22 : tmp/frame_22.jpg : 0 persons Processing image number 23 : tmp/frame_23.jpg : 1 persons Processing image number 24 : tmp/frame_24.jpg : 2 persons Processing image number 25 : tmp/frame_25.jpg : 1 persons Processing image number 26 : tmp/frame_26.jpg : 2 persons Processing image number 27 : tmp/frame_27.jpg : 1 persons Processing image number 28 : tmp/frame_28.jpg : 1 persons Processing image number 29 : tmp/frame_29.jpg : 0 persons Processing image number 30 : tmp/frame_30.jpg : 1 persons Processing image number 31 : tmp/frame_31.jpg : 1 persons Processing image number 32 : tmp/frame_32.jpg : 2 persons Processing image number 33 : tmp/frame_33.jpg : 1 persons Processing image number 34 : tmp/frame_34.jpg : 1 persons Processing image number 35 : tmp/frame_35.jpg : 1 persons Processing image number 36 : tmp/frame_36.jpg : 0 persons Processing image number 37 : tmp/frame_37.jpg : 0 persons Processing image number 38 : tmp/frame_38.jpg : 0 persons Processing image number 39 : tmp/frame_39.jpg : 0 persons Processing image number 40 : tmp/frame_40.jpg : 0 persons Processing image number 41 : tmp/frame_41.jpg : 0 persons Processing image number 42 : tmp/frame_42.jpg : 0 persons Processing image number 43 : tmp/frame_43.jpg : 0 persons Processing image number 44 : tmp/frame_44.jpg : 0 persons Processing image number 45 : tmp/frame_45.jpg : 0 persons Processing image number 46 : tmp/frame_46.jpg : 0 persons Processing image number 47 : tmp/frame_47.jpg : 0 persons Processing image number 48 : tmp/frame_48.jpg : 0 persons Processing image number 49 : tmp/frame_49.jpg : 0 persons Processing image number 50 : tmp/frame_50.jpg : 0 persons Processing image number 51 : tmp/frame_51.jpg : 0 persons Processing image number 52 : tmp/frame_52.jpg : 0 persons Processing image number 53 : tmp/frame_53.jpg : 0 persons Processing image number 54 : tmp/frame_54.jpg : 0 persons Processing image number 55 : tmp/frame_55.jpg : 0 persons Processing image number 56 : tmp/frame_56.jpg : 0 persons Processing image number 57 : tmp/frame_57.jpg : 0 persons Processing image number 58 : tmp/frame_58.jpg : 0 persons Processing image number 59 : tmp/frame_59.jpg : 0 persons Processing image number 60 : tmp/frame_60.jpg : 0 persons Processing image number 61 : tmp/frame_61.jpg : 0 persons Processing image number 62 : tmp/frame_62.jpg : 0 persons Processing image number 63 : tmp/frame_63.jpg : 0 persons Processing image number 64 : tmp/frame_64.jpg : 0 persons Processing image number 65 : tmp/frame_65.jpg : 0 persons Processing image number 66 : tmp/frame_66.jpg : 1 persons Processing image number 67 : tmp/frame_67.jpg : 2 persons Processing image number 68 : tmp/frame_68.jpg : 2 persons Processing image number 69 : tmp/frame_69.jpg : 2 persons Processing image number 70 : tmp/frame_70.jpg : 2 persons Processing image number 71 : tmp/frame_71.jpg : 0 persons Processing image number 72 : tmp/frame_72.jpg : 1 persons Processing image number 73 : tmp/frame_73.jpg : 0 persons Processing image number 74 : tmp/frame_74.jpg : 0 persons Processing image number 75 : tmp/frame_75.jpg : 0 persons Processing image number 76 : tmp/frame_76.jpg : 0 persons Processing image number 77 : tmp/frame_77.jpg : 0 persons Processing image number 78 : tmp/frame_78.jpg : 0 persons Processing image number 79 : tmp/frame_79.jpg : 2 persons Processing image number 80 : tmp/frame_80.jpg : 0 persons Processing image number 81 : tmp/frame_81.jpg : 0 persons Processing image number 82 : tmp/frame_82.jpg : 0 persons Processing image number 83 : tmp/frame_83.jpg : 0 persons Processing image number 84 : tmp/frame_84.jpg : 0 persons Processing image number 85 : tmp/frame_85.jpg : 0 persons Processing image number 86 : tmp/frame_86.jpg : 1 persons Processing image number 87 : tmp/frame_87.jpg : 0 persons Processing image number 88 : tmp/frame_88.jpg : 0 persons Processing image number 89 : tmp/frame_89.jpg : 0 persons Processing image number 90 : tmp/frame_90.jpg : 0 persons Processing image number 91 : tmp/frame_91.jpg : 0 persons Processing image number 92 : tmp/frame_92.jpg : 1 persons Processing image number 93 : tmp/frame_93.jpg : 0 persons Processing image number 94 : tmp/frame_94.jpg : 0 persons Processing image number 95 : tmp/frame_95.jpg : 0 persons Processing image number 96 : tmp/frame_96.jpg : 0 persons Processing image number 97 : tmp/frame_97.jpg : 0 persons Processing image number 98 : tmp/frame_98.jpg : 1 persons Processing image number 99 : tmp/frame_99.jpg : 0 persons Processing image number 100 : tmp/frame_100.jpg : 0 persons Processing image number 101 : tmp/frame_101.jpg : 0 persons Processing image number 102 : tmp/frame_102.jpg : 0 persons Processing image number 103 : tmp/frame_103.jpg : 0 persons Processing image number 104 : tmp/frame_104.jpg : 0 persons Processing image number 105 : tmp/frame_105.jpg : 0 persons Processing image number 106 : tmp/frame_106.jpg : 0 persons CPU times: user 478 ms, sys: 126 ms, total: 604 ms Wall time: 47.5 s ###Markdown Write results to json for safekeeping ###Code with open('results.json', 'w') as fp: json.dump(results, fp) ###Output _____no_output_____ ###Markdown Extract the person count and visualise ###Code persons = {p['frame_count']:p['persons'] for p in results} pd.Series(persons).plot.bar(figsize=(15,5)).set_ylabel('Person count') ###Output _____no_output_____ ###Markdown This looks approximately correct but appears to be poor at identifying the individuals. We could use more frames to improve our chance of detection as maybe the sampled frames are not the best Track with sort* https://towardsdatascience.com/detect-and-track-baseball-using-detectron2-and-sort-6dd92a46e6f2* https://github.com/abewley/sortNote I have just placed the contents of the sort repo in the folder `sort` ###Code # !pip install -r sort/requirements.txt ###Output _____no_output_____ ###Markdown Now we iterate over the frames, and for each frame pass in the bounding box coordinates, and get back the object tracked ID. * Sort expects a numpy array of detections in the format of `[[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],โ€ฆ]`* Use `np.empty((0, 5))` for frames without detectionsWrite a helper to extract this info from result[10] ###Code test_prediction = [{'confidence': 0.97203445, 'label': 'person', 'y_min': 0, 'x_min': 104, 'y_max': 108, 'x_max': 162}, {'confidence': 0.8538234, 'label': 'person', 'y_min': 2, 'x_min': 185, 'y_max': 84, 'x_max': 235}] test_prediction def get_detections(prediction : list) -> np.array: detections = [] people = [p for p in prediction if p['label']=='person'] if len(people) == 0: return np.empty((0, 5)) for p in people: detection = np.array([p['x_min'], p['y_min'], p['x_max'], p['y_max'], p['confidence']]) detections.append(detection) return np.array(detections) get_detections(test_prediction) #create instance of SORT. Note that rerunning this cell increments the track id each time mot_tracker = Sort() for i, frame_info in enumerate(results): image = str(frame_info['frame_path']) detections = get_detections(frame_info['predictions']) track_bbs_ids = mot_tracker.update(detections) print(i, track_bbs_ids) ###Output 0 [] 1 [] 2 [] 3 [] 4 [] 5 [] 6 [] 7 [] 8 [] 9 [] 10 [] 11 [] 12 [] 13 [] 14 [] 15 [] 16 [] 17 [] 18 [] 19 [] 20 [] 21 [] 22 [] 23 [] 24 [] 25 [[183.96008252 44.48918636 253.9329312 165.70161142 2. ]] 26 [[187.51927306 63.42292068 260.85094848 187.80326276 2. ]] 27 [[187.91299682 86.18878162 264.61184814 215.06813559 2. ]] 28 [[184.38561766 105.32894448 261.83471226 226.89267335 2. ]] 29 [] 30 [] 31 [] 32 [] 33 [] 34 [[194.44007995 232.20141163 266.79182515 305.72407341 7. ]] 35 [[195.91589067 250.91134354 262.48308501 307.04828682 7. ]] 36 [] 37 [] 38 [] 39 [] 40 [] 41 [] 42 [] 43 [] 44 [] 45 [] 46 [] 47 [] 48 [] 49 [] 50 [] 51 [] 52 [] 53 [] 54 [] 55 [] 56 [] 57 [] 58 [] 59 [] 60 [] 61 [] 62 [] 63 [] 64 [] 65 [] 66 [] 67 [] 68 [] 69 [[222.88807956 194.25703272 310.70742558 287.16672684 8. ]] 70 [[303.4908146 189.9329321 373.6862038 269.61225669 9. ] [220.52678672 170.09929987 310.26744733 275.23143929 8. ]] 71 [] 72 [] 73 [] 74 [] 75 [] 76 [] 77 [] 78 [] 79 [] 80 [] 81 [] 82 [] 83 [] 84 [] 85 [] 86 [] 87 [] 88 [] 89 [] 90 [] 91 [] 92 [] 93 [] 94 [] 95 [] 96 [] 97 [] 98 [] 99 [] 100 [] 101 [] 102 [] 103 [] 104 [] 105 [] 106 []
L10_LDA x PCA/L10_linear_discriminant_analysis.ipynb
###Markdown Linear Discriminant Analysis from scratch Introduction Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications. The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting ("curse of dimensionality") and also reduce computational costs.Ronald A. Fisher formulated the *Linear Discriminant* in 1936 ([The Use of Multiple Measurements in Taxonomic Problems](http://onlinelibrary.wiley.com/doi/10.1111/j.1469-1809.1936.tb02137.x/abstract)), and it also has some practical uses as classifier. The original Linear discriminant was described for a 2-class problem, and it was then later generalized as "multi-class Linear Discriminant Analysis" or "Multiple Discriminant Analysis" by C. R. Rao in 1948 ([The utilization of multiple measurements in problems of biological classification](http://www.jstor.org/stable/2983775)) **The general LDA approach is very similar to a Principal Component Analysis, but in addition to finding the component axes that maximize the variance of our data (PCA), we are additionally interested in the axes that maximize the separation between multiple classes (LDA).** So, in a nutshell, often the goal of an LDA is to project a feature space (a dataset n-dimensional samples) onto a smaller subspace $k$ (where $k \leq n-1$) while maintaining the class-discriminatory information. In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation ("curse of dimensionality"). Principal Component Analysis vs. Linear Discriminant Analysis Both **Linear Discriminant Analysis (LDA)** and **Principal Component Analysis (PCA)** are linear transformation techniques that are commonly used for dimensionality reduction. **PCA can be described as an "unsupervised"** algorithm, since it "ignores" class labels and its goal is to find the directions (the so-called principal components) that maximize the variance in a dataset.In contrast to PCA, **LDA is "supervised"** and computes the directions ("linear discriminants") that will represent the axes that that maximize the separation between multiple classes.Although it might sound intuitive that LDA is superior to PCA for a multi-class classification task where the class labels are known, this might not always the case. For example, comparisons between classification accuracies for image recognition after using PCA or LDA show that PCA tends to outperform LDA if the number of samples per class is relatively small ([PCA vs. LDA](http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=908974), A.M. Martinez et al., 2001).In practice, it is also not uncommon to use both LDA and PCA in combination: E.g., PCA for dimensionality reduction followed by an LDA. What is a "good" feature subspace? Let's assume that our goal is to reduce the dimensions of a $d$-dimensional dataset by projecting it onto a $(k)$-dimensional subspace (where $k\;<\;d$). So, how do we know what size we should choose for $k$ ($k$ = the number of dimensions of the new feature subspace), and how do we know if we have a feature space that represents our data "well"? Later, we will compute eigenvectors (the components) from our data set and collect them in a so-called scatter-matrices (i.e., the in-between-class scatter matrix and within-class scatter matrix). Each of these eigenvectors is associated with an eigenvalue, which tells us about the "length" or "magnitude" of the eigenvectors. If we would observe that all eigenvalues have a similar magnitude, then this may be a good indicator that our data is already projected on a "good" feature space. And in the other scenario, if some of the eigenvalues are much much larger than others, we might be interested in keeping only those eigenvectors with the highest eigenvalues, since they contain more information about our data distribution. Vice versa, eigenvalues that are close to 0 are less informative and we might consider dropping those for constructing the new feature subspace. Summarizing the LDA approach in 6 steps Listed below are the 6 general steps for performing a linear discriminant analysis; we will explore them in more detail in the following sections.1. Center points2. Compute the $d$-dimensional mean vectors for the different classes from the dataset.3. Compute the scatter matrices (in-between-class and within-class scatter matrix).4. Compute the eigenvectors ($\pmb e_1, \; \pmb e_2, \; ..., \; \pmb e_d$) and corresponding eigenvalues ($\pmb \lambda_1, \; \pmb \lambda_2, \; ..., \; \pmb \lambda_d$) for the scatter matrices.5. Sort the eigenvectors by decreasing eigenvalues and choose $k$ eigenvectors with the largest eigenvalues to form a $d \times k$ dimensional matrix $\pmb W\;$ (where every column represents an eigenvector).6. Use this $d \times k$ eigenvector matrix to transform the samples onto the new subspace. This can be summarized by the matrix multiplication $\pmb Y = \pmb X \times \pmb W$ (where $\pmb X$ is a $n \times d$-dimensional matrix representing the $n$ samples, and $\pmb y$ are the transformed $n \times k$-dimensional samples in the new subspace). Preparing the sample data set Open dataset ###Code import seaborn as sns df = sns.load_dataset("iris") ###Output _____no_output_____ ###Markdown Exploratory data analysis (EDA) ###Code sns.pairplot(df, hue="species") ###Output _____no_output_____ ###Markdown From just looking at these simple graphical representations of the features, we can already tell that the petal lengths and widths are likely better suited as potential features two separate between the three flower classes. In practice, instead of reducing the dimensionality via a projection (here: LDA), a good alternative would be a feature selection technique. For low-dimensional datasets like Iris, a glance at those histograms would already be very informative. Another simple, but very useful technique would be to use feature selection algorithms, which I have described in more detail in future lectures Normality assumptions It should be mentioned that LDA assumes normal distributed data, features that are statistically independent, and identical covariance matrices for every class. However, this only applies for LDA as classifier and LDA for dimensionality reduction can also work reasonably well if those assumptions are violated. And even for classification tasks LDA seems can be quite robust to the distribution of the data: > "linear discriminant analysis frequently achieves good performances in> the tasks of face and object recognition, even though the assumptions> of common covariance matrix among groups and normality are often> violated (Duda, et al., 2001)" (Tao Li, et al., 2006).Tao Li, Shenghuo Zhu, and Mitsunori Ogihara. โ€œ[Using Discriminant Analysis for Multi-Class Classification: An Experimental Investigation](http://link.springer.com/article/10.1007%2Fs10115-006-0013-y).โ€ Knowledge and Information Systems 10, no. 4 (2006): 453โ€“72.) Duda, Richard O, Peter E Hart, and David G Stork. 2001. Pattern Classification. New York: Wiley. Preprocessing ###Code df.head() X = df[['sepal_length','sepal_width','petal_length','petal_width']].values y = df['species'].values enc = LabelEncoder() enc.fit(y) y = enc.transform(y) ###Output _____no_output_____ ###Markdown LDA in 6 steps After we went through several preparation steps, our data is finally ready for the actual LDA. In practice, LDA for dimensionality reduction would be just another preprocessing step for a typical machine learning or pattern classification task. Step1: Center points ###Code X = X - X.mean(axis=0) ###Output _____no_output_____ ###Markdown Step 2: Computing the d-dimensional mean vectors In this first step, we will start off with a simple computation of the mean vectors $\boldsymbol{\mu}_i$, $(i = 1,2,3)$ of the 3 different flower classes: $\boldsymbol{\mu}_i = \begin{bmatrix} \mu_{\omega_i (\text{sepal length)}}\\ \mu_{\omega_i (\text{sepal width})}\\ \mu_{\omega_i (\text{petal length)}}\\\mu_{\omega_i (\text{petal width})}\\\end{bmatrix} \; , \quad \text{with} \quad i = 1,2,3$ ###Code np.set_printoptions(precision=4, suppress=True) mean_vectors = [] for cl in range(0,3): mean_vectors.append(np.mean(X[y==cl], axis=0)) print('Mean Vector class %s: %s\n' %(cl, mean_vectors[cl])) ###Output Mean Vector class 0: [-0.8373 0.3707 -2.296 -0.9533] Mean Vector class 1: [ 0.0927 -0.2873 0.502 0.1267] Mean Vector class 2: [ 0.7447 -0.0833 1.794 0.8267] ###Markdown Step 3: Computing the Scatter Matrices Now, we will compute the two *4x4*-dimensional matrices: The within-class and the between-class scatter matrix. 3.1 Within-class scatter matrix $S_W$ The **within-class scatter** matrix $S_W$ is computed by the following equation: $S_W = \sum\limits_{i=1}^{c} S_i$where $S_i = \sum\limits_{\pmb x \in D_i}^n (\pmb x - \pmb \mu_i)\;(\pmb x - \pmb \mu_i)^T$ (scatter matrix for every class) and $\pmb \mu_i$ is the mean vector $\pmb \mu_i = \frac{1}{n_i} \sum\limits_{\pmb x \in D_i}^n \; \pmb x_k$ ###Code S_W = np.zeros((4,4)) for cl,mv in zip(range(0,3), mean_vectors): class_sc_mat = np.zeros((4,4)) # scatter matrix for every class for row in X[y == cl]: row, mv = row.reshape(4,1), mv.reshape(4,1) # make column vectors class_sc_mat += (row-mv).dot((row-mv).T) S_W += class_sc_mat # sum class scatter matrices print('within-class Scatter Matrix:\n', S_W) ###Output within-class Scatter Matrix: [[38.9562 13.63 24.6246 5.645 ] [13.63 16.962 8.1208 4.8084] [24.6246 8.1208 27.2226 6.2718] [ 5.645 4.8084 6.2718 6.1566]] ###Markdown 3.2 Between-class scatter matrix $S_B$ The **between-class scatter** matrix $S_B$ is computed by the following equation: $S_B = \sum\limits_{i=1}^{c} N_{i} (\pmb \mu_i - \pmb \mu) (\pmb \mu_i - \pmb \mu)^T$where $\pmb \mu$ is the overall mean, and $\pmb \mu_{i}$ and $N_{i}$ are the sample mean and sizes of the respective classes. ###Code overall_mean = np.mean(X, axis=0) S_B = np.zeros((4,4)) for i,mean_vec in enumerate(mean_vectors): n = X[y==i,:].shape[0] mean_vec = mean_vec.reshape(4,1) # make column vector overall_mean = overall_mean.reshape(4,1) # make column vector S_B += n * (mean_vec - overall_mean).dot((mean_vec - overall_mean).T) print('between-class Scatter Matrix:\n', S_B) ###Output between-class Scatter Matrix: [[ 63.2121 -19.9527 165.2484 71.2793] [-19.9527 11.3449 -57.2396 -22.9327] [165.2484 -57.2396 437.1028 186.774 ] [ 71.2793 -22.9327 186.774 80.4133]] ###Markdown Step 4: Solving the generalized eigenvalue problem for the matrix $S_{W}^{-1}S_B$ Next, we will solve the generalized eigenvalue problem for the matrix $S_{W}^{-1}S_B$ to obtain the linear discriminants. ###Code eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B)) for i in range(len(eig_vals)): #eigvec_sc = eig_vecs[:,i].reshape(4,1) print('\nEigenvector {}: \n{}'.format(i+1, eig_vecs[:,i].real)) print('Eigenvalue {:}: {:.2e}'.format(i+1, eig_vals[i].real)) ###Output Eigenvector 1: [-0.2087 -0.3862 0.554 0.7074] Eigenvalue 1: 3.22e+01 Eigenvector 2: [-0.0065 -0.5866 0.2526 -0.7695] Eigenvalue 2: 2.85e-01 Eigenvector 3: [ 0.8825 -0.2639 -0.2357 -0.31 ] Eigenvalue 3: 4.76e-15 Eigenvector 4: [-0.2844 0.4123 0.4716 -0.7258] Eigenvalue 4: -8.19e-15 ###Markdown After this decomposition of our square matrix into eigenvectors and eigenvalues, let us briefly recapitulate how we can interpret those results. As we remember from our first linear algebra class in high school or college, both eigenvectors and eigenvalues are providing us with information about the distortion of a linear transformation: The eigenvectors are basically the direction of this distortion, and the eigenvalues are the scaling factor for the eigenvectors that describing the magnitude of the distortion. If we are performing the LDA for dimensionality reduction, the eigenvectors are important since they will form the new axes of our new feature subspace; the associated eigenvalues are of particular interest since they will tell us how "informative" the new "axes" are. Let us briefly double-check our calculation and talk more about the eigenvalues in the next section. Checking the eigenvector-eigenvalue calculation A quick check that the eigenvector-eigenvalue calculation is correct and satisfy the equation:$\pmb A\pmb{v} = \lambda\pmb{v}$ where $\pmb A = S_{W}^{-1}S_B\\\pmb{v} = \; \text{Eigenvector}\\\lambda = \; \text{Eigenvalue}$ ###Code for i in range(len(eig_vals)): eigv = eig_vecs[:,i].reshape(4,1) np.testing.assert_array_almost_equal(np.linalg.inv(S_W).dot(S_B).dot(eigv), eig_vals[i] * eigv, decimal=6, err_msg='', verbose=True) print('ok') ###Output ok ###Markdown Step 5: Selecting linear discriminants for the new feature subspace 5.1. Sorting the eigenvectors by decreasing eigenvalues Remember from the introduction that we are not only interested in merely projecting the data into a subspace that improves the class separability, but also reduces the dimensionality of our feature space, (where the eigenvectors will form the axes of this new feature subspace). However, the eigenvectors only define the directions of the new axis, since they have all the same unit length 1. So, in order to decide which eigenvector(s) we want to drop for our lower-dimensional subspace, we have to take a look at the corresponding eigenvalues of the eigenvectors. Roughly speaking, the eigenvectors with the lowest eigenvalues bear the least information about the distribution of the data, and those are the ones we want to drop. The common approach is to rank the eigenvectors from highest to lowest corresponding eigenvalue and choose the top $k$ eigenvectors. ###Code i = np.flip(np.argsort(eig_vals)) eig_vals=np.abs(eig_vals[i]) eig_vecs=eig_vecs[:,i] eig_vals print('Variance explained:\n') eigv_sum = np.sum(eig_vals) var_expl = eig_vals/eigv_sum var_expl ###Output Variance explained: ###Markdown If we take a look at the eigenvalues, we can already see that 2 eigenvalues are close to 0. The reason why these are close to 0 is not that they are not informative but it's a floating-point imprecision. In fact, these two last eigenvalues should be exactly zero: In LDA, the number of linear discriminants is at most $cโˆ’1$ where $c$ is the number of class labels, since the in-between scatter matrix $S_B$ is the sum of $c$ matrices with rank 1 or less. Note that in the rare case of perfect collinearity (all aligned sample points fall on a straight line), the covariance matrix would have rank one, which would result in only one eigenvector with a nonzero eigenvalue. The first eigenpair is by far the most informative one, and we won't loose much information if we would form a 1D-feature spaced based on this eigenpair. 5.2. Choosing *k* eigenvectors with the largest eigenvalues After sorting the eigenpairs by decreasing eigenvalues, it is now time to construct our $d \times k$-dimensional eigenvector matrix $\pmb W$ (here $4 \times 2$: based on the 2 most informative eigenpairs) and thereby reducing the initial 4-dimensional feature space into a 2-dimensional feature subspace. ###Code W = eig_vecs[:,0:2] W ###Output _____no_output_____ ###Markdown Step 6: Transforming the samples onto the new subspace In the last step, we use the $4 \times 2$-dimensional matrix $\pmb W$ that we just computed to transform our samples onto the new subspace via the equation $\pmb Y = \pmb X \times \pmb W $.(where $\pmb X$ is a $n \times d$-dimensional matrix representing the $n$ samples, and $\pmb Y$ are the transformed $n \times k$-dimensional samples in the new subspace). ###Code X_lda = X.dot(W) np.shape(X_lda) ###Output _____no_output_____ ###Markdown The scatter plot above represents our new feature subspace that we constructed via LDA. We can see that the first linear discriminant "LD1" separates the classes quite nicely. However, the second discriminant, "LD2", does not add much valuable information, which we've already concluded when we looked at the ranked eigenvalues is step 4. ###Code plt.scatter(X_lda[:,0], X_lda[:,1], c=y, cmap='viridis') ###Output _____no_output_____ ###Markdown LDA via scikit-learn Now, after we have seen how an Linear Discriminant Analysis works using a step-by-step approach, there is also a more convenient way to achive the same via the `LDA` class implemented in the [`scikit-learn`](http://scikit-learn.org/stable/) machine learning library. ###Code from sklearn.discriminant_analysis import LinearDiscriminantAnalysis # LDA lda = LinearDiscriminantAnalysis(n_components=2) lda.fit(X, y) X_lda = lda.transform(X) plt.scatter(X_lda[:,0], X_lda[:,1], c=y, cmap='viridis') ###Output _____no_output_____ ###Markdown A comparison of PCA and LDA In order to compare the feature subspace that we obtained via the Linear Discriminant Analysis, we will use the `PCA` class from the `scikit-learn` machine-learning library. The documentation can be found here: [https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.htmlsphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.htmlsphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py). For our convenience, we can directly specify to how many components we want to retain in our input dataset via the `n_components` parameter. ###Code from sklearn.decomposition import PCA # PCA pca = PCA(n_components=2) pca.fit(X, y) X_pca = pca.transform(X) plt.scatter(X_pca[:,0], X_pca[:,1], c=y, cmap='viridis') ###Output _____no_output_____
finlab/u12_svm.ipynb
###Markdown Support Vector Machine (SVM) features ###Code import finlab.ml as ml dataset = ml.fundamental_features() dataset.head() dataset.columns features = ['R103_ROE็จ…ๅพŒ', 'R402_็‡Ÿๆฅญๆฏ›ๅˆฉๆˆ้•ท็އ'] dataset = dataset[features].dropna(how='any') dataset.head() ###Output _____no_output_____ ###Markdown add prediction ###Code ml.add_profit_prediction(dataset) dataset.head() %matplotlib inline dataset.plot.scatter(features[0], features[1]) ###Output _____no_output_____ ###Markdown remove outliers ###Code def is_valid(feature, nstd): ub = feature.mean() + nstd * feature.std() lb = feature.mean() - nstd * feature.std() return (feature > lb) & (feature <ub) valid = is_valid(dataset['R103_ROE็จ…ๅพŒ'], 2) & is_valid(dataset['R402_็‡Ÿๆฅญๆฏ›ๅˆฉๆˆ้•ท็އ'], 0.05) dataset_rmoutliers = dataset[valid].dropna() dataset_rmoutliers['R103_ROE็จ…ๅพŒ'].hist(bins=100) #dataset_rmoutliers['R402_็‡Ÿๆฅญๆฏ›ๅˆฉๆˆ้•ท็އ'].hist(bins=100) ###Output _____no_output_____ ###Markdown Scale features ###Code import pandas as pd import sklearn.preprocessing as preprocessing dataset_scaled = pd.DataFrame(preprocessing.scale(dataset_rmoutliers), index=dataset_rmoutliers.index, columns=dataset_rmoutliers.columns) dataset_scaled.head() dataset_scaled['R103_ROE็จ…ๅพŒ'].hist(bins=100) dataset_scaled['R402_็‡Ÿๆฅญๆฏ›ๅˆฉๆˆ้•ท็އ'].hist(bins=100, alpha=0.5) dataset_scaled['return'] = dataset_rmoutliers['return'] ###Output _____no_output_____ ###Markdown Training ###Code from sklearn.model_selection import train_test_split dataset_train, dataset_test = train_test_split(dataset_scaled, test_size=0.1, random_state=0) from sklearn.svm import SVC cf = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma='auto_deprecated', kernel='linear', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) cf.fit(dataset_train[features], dataset_train['return'] > dataset_train['return'].quantile(0.5)) from mlxtend.plotting import plot_decision_regions features_plot = dataset_test[features].values labels_plot = (dataset_test['return'] > dataset_test['return'].quantile(0.5)).astype(int).values plot_decision_regions(features_plot, labels_plot, cf) ###Output _____no_output_____ ###Markdown backtest ###Code history = dataset_test.copy() history['svm prediction'] = cf.predict(dataset_test[features]) history = history.reset_index() dates = sorted(list(set(history['date']))) seasonal_returns1 = [] seasonal_returns2 = [] for date in dates: current_stocks = history[history['date'] == date] buy_stocks = current_stocks[current_stocks['svm prediction'] == True] sell_stocks = current_stocks[current_stocks['svm prediction'] == False] seasonal_return1 = buy_stocks['return'].mean() seasonal_returns1.append(seasonal_return1) seasonal_return2 = sell_stocks['return'].mean() seasonal_returns2.append(seasonal_return2) import matplotlib.pyplot as plt plt.style.use("ggplot") pd.Series(seasonal_returns1, index=dates).cumprod().plot(color='red') pd.Series(seasonal_returns2, index=dates).cumprod().plot(color='blue') ###Output _____no_output_____
2021/Cviceni 2.ipynb
###Markdown Hledรกnรญ koล™enลฏNaprogramujte hledรกnรญ koล™enลฏ metodou pลฏlenรญ intervalu.Najdฤ›te koล™en sin(x) mezi 3 a 4 metodou pลฏlenรญ intervalu ###Code a, b = 3, 4 f = np.sin def bisect(f, a, b, maxiter=53): # ukol: definujte tฤ›lo tรฉto funkce if np.sign(f(a))*np.sign(f(b)) >= 0: raise(ValueError("Function sign must differ at a and b")) for i in range(maxiter): m = (a+b)/2. fm = f(m) if m in [a, b] or fm == 0: # floating point tolerance reached or exact solution found return m if fm*np.sign(f(a)) < 0: b = m elif fm*np.sign(f(b)) < 0: a = m return m bisect(f, 3, 4) ###Output _____no_output_____ ###Markdown Newtonova metoda ###Code def newton(f, df, a): for i in range(10): a_new = a - f(a)/df(a) if a_new == a: return a a = a_new df = np.cos newton(f, df, 4.8), newton(f, df, 4.) ###Output _____no_output_____ ###Markdown Minimalizace รškol ฤ. 1: naprogramujte hledรกnรญ minima metoda zlatรฉho ล™ezu (dฤ›lenรญ intervalu).Najdฤ›te minimum funkce `cos(x)` v intervalu `[2, 4]`. Sledujte rychlost konvergence - tedy pล™esnost urฤenรญ polohy minima `xmin` a funkฤnรญ hodnoty `cos(xmin)` v minimu v zรกvislosti na poฤtu iteracรญ. S jakou pล™esnostรญ lze tyto parametry urฤit? Potรฉ najdฤ›te minimum funkce `1 + (x-0.1)**4` na intervalu `[-1, 1]` a obdobnรฝm zpลฏsobem zhodnoลฅte pล™enost urฤenรญ minima. ###Code def golden_min(f, a, b, tol=1e-5): iphi = 2/(np.sqrt(5) + 1) # 0.618... approximations = [[a, b]] c = b - (b - a) * iphi d = a + (b - a) * iphi while np.abs(b - a) > tol: if f(c) < f(d): b = d else: a = c approximations.append([a,b]) c = b - (b - a) * iphi d = a + (b - a) * iphi return (b + a) / 2, np.array(approximations) f = np.cos xmin, approx = golden_min(f, 3, 4, tol=1e-14) plt.plot(np.arange(approx.shape[0]), approx[:,0]) plt.plot(np.arange(approx.shape[0]), approx[:,1]) plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,0]-np.pi)) plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,1]-np.pi)) plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,0])+1), "--") plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,1])+1), "--") plt.grid() plt.gca().set_yscale("log") xmin - np.pi # ~sqrt(eps) f = lambda x: (x-0.1)**4 true_min = 0.1 x = np.linspace(-1, 1) plt.plot(x, f(x)) xmin, approx = golden_min(f, -1, 1, tol=1e-14) plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,0]-true_min)) plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,1]-true_min)) plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,0])-f(true_min)), "--") plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,1])-f(true_min)), "--") plt.grid() plt.gca().set_yscale("log") xmin Newtonova metoda ###Output _____no_output_____ ###Markdown Ukรกzka Newtonovy metody ###Code def newton_min(f, df, ddf, a): for i in range(10): a_new = a - df(a)/ddf(a) if a_new == a: if ddf(a) > 0: return a else: raise(RuntimeError("Method did not converge to minimum")) a = a_new def f(x): return x**2 + x def df(x): return 2*x + 1 def ddf(x): return 2 newton_min(f, df, ddf, 1) x = np.linspace(-2, 2) plt.plot(x, f(x)) plt.ylim(ymax=2) plt.grid() ###Output _____no_output_____
14-.ipynb
###Markdown ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{0}}}$ L0่Œƒๆ•ฐ:่กจ็คบๅ‘้‡xไธญ้ž้›ถๅ…ƒ็ด ็š„ไธชๆ•ฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{1}}}$ L1่Œƒๆ•ฐ๏ผˆๅˆ—ๆจก๏ผ‰:่กจ็คบๅ‘้‡xไธญ้ž้›ถๅ…ƒ็ด ็š„็ปๅฏนๅ€ผไน‹ๅ’Œ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{2}}}$ L2่Œƒๆ•ฐ๏ผˆ่ฐฑๆจก๏ผ‰:่กจ็คบๅ‘้‡ๅ…ƒ็ด ็š„ๅนณๆ–นๅ’Œๅ†ๅผ€ๅนณๆ–นๆฑ‚$A^{T}A$็š„็‰นๅพๅ€ผ๏ผŒๆ‰พๅ‡บๅ…ถไธญ็š„ๆœ€ๅคง็‰นๅพๅ€ผ๏ผŒๆฑ‚ๅ…ถๅนณๆ–นๆ น,็›ธๅฝ“ไบŽ$max(sqrt(eig(A^{T}A)))$๏ผŒไนŸๅซ่ฐฑ่Œƒๆ•ฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{F}}}$ F่Œƒๆ•ฐ:ๆ˜ฏๆŠŠไธ€ไธช็Ÿฉ้˜ตไธญๆฏไธชๅ…ƒ็ด ็š„ๅนณๆ–นๆฑ‚ๅ’ŒๅŽๅผ€ๆ นๅท(็”จไบŽ่กจ็คบ็Ÿฉ้˜ต้‡็บง) ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{\infty}}}$ ๆ— ็ฉท่Œƒๆ•ฐ๏ผˆ่กŒๆจก๏ผ‰:ๅบฆ้‡ๅ‘้‡ๅ…ƒ็ด ็š„ๆœ€ๅคงๅ€ผ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{*}}}$ ๆ ธ่Œƒๆ•ฐNuclear Norm:็Ÿฉ้˜ตๅฅ‡ๅผ‚ๅ€ผ็š„ๅ’Œ(็”จไบŽ่กจ็คบไฝŽ็งฉ็Ÿฉ้˜ต) ๅญ็ฉบ้—ด่š็ฑป๏ผˆSubspace clustering๏ผ‰[Subspace clustering](https://towardsdatascience.com/subspace-clustering-7b884e8fff73) ๆœฌๆ•™็จ‹่งฃๅ†ณไปฅไธ‹้—ฎ้ข˜๏ผš 1. ้ซ˜็ปดๆ•ฐๆฎ็š„ๅค„็†ๅญ˜ๅœจไป€ไนˆๆŒ‘ๆˆ˜๏ผŸ+ ไป€ไนˆๆ˜ฏๅญ็ฉบ้—ด่š็ฑป๏ผŸ+ ๅฆ‚ไฝ•็”จpython่ฟ›่กŒๅญ็ฉบ้—ด่š็ฑป ้ซ˜็ปดๆ•ฐๆฎๆœ‰ไปŽๅ‡ ๅๅˆฐ็”š่‡ณๅ‡ ๅƒไธช็ปดๅบฆใ€‚ๆฏ”ๅฆ‚๏ผŒODๆ•ฐๆฎ ้ซ˜็ปดๆ•ฐๆฎ็š„ๅค„็†ๅญ˜ๅœจๅ›ฐ้šพ๏ผš1. ๅฏ่ง†ๅŒ–ๅ›ฐ้šพ๏ผŒๅพˆ้šพ็†่งฃๆ•ฐๆฎ้•ฟไป€ไนˆๆ ท๏ผŒๅ› ๆญคๅฎƒ้œ€่ฆ้™็ปดใ€‚ไนŸๅฏผ่‡ดไบ†็ปดๅบฆ็พ้šพ๏ผŒๅณๅพˆ้šพๅฏนๆฏไธชๅญ็ปดๅบฆ่ฟ›่กŒๆžšไธพ่ฟญไปฃ+ ๅ‰้ข้™็ปดๆŠ€ๆœฏ็š„้€‰ๆ‹ฉไผšๆžๅคง็š„ๅฝฑๅ“ๅŽ็ปญ็š„่š็ฑปๆ•ˆๆžœ+ ่ฎธๅคš็ปดๅบฆๅฏ่ƒฝๆ˜ฏไธ็›ธๅ…ณ็š„๏ผŒๅนถไธ”ๅฏไปฅๅœจๆœ‰ๅ™ชๅฃฐ็š„ๆ•ฐๆฎไธญๅฑ่”ฝ็Žฐๆœ‰็š„่š็ฑป+ ไธ€็งๅธธ่ง็š„ๆŠ€ๆœฏๆ˜ฏๆ‰ง่กŒ็‰นๅพ้€‰ๆ‹ฉ๏ผˆๅˆ ้™คไธ็›ธๅ…ณ็š„็ปดๅบฆ๏ผ‰๏ผŒไฝ†ๆ˜ฏๅœจๆŸไบ›ๆƒ…ๅ†ตไธ‹๏ผŒ่ฏ†ๅˆซๅ†—ไฝ™็ปดๅบฆๅนถไธๅฎนๆ˜“ ไป€ไนˆๆ˜ฏๅญ็ฉบ้—ด่š็ฑป๏ผŸ> ๅญ็ฉบ้—ด่š็ฑปๆ˜ฏไธ€็งๅœจไธๅŒๅญ็ฉบ้—ดไธญๅ‘็Žฐ่š็ฑป็š„ๆŠ€ๆœฏใ€‚ *ๅญ็ฉบ้—ดๅณๆ˜ฏๆ•ฐๆฎ้‡Œ้ขไธ€ไธชๆˆ–่€…ๅคšไธช็ปดๅบฆ็š„็ป„ๅˆ*ๅŸบๆœฌ็š„ๅ‡่ฎพๆ˜ฏ๏ผŒๆˆ‘ไปฌๅฏไปฅๆ‰พๅˆฐๅช็”ฑ็ปดๅบฆๅญ้›†ๅฎšไน‰็š„ๆœ‰ๆ•ˆ่š็ฑป๏ผˆไธ้œ€่ฆๅ…ทๆœ‰ๆ‰€ๆœ‰Nไธช็‰นๅพ็š„ไธ€่‡ดๆ€ง๏ผ‰ใ€‚ > **ไธพไพ‹:** ๅฆ‚ๆžœๆˆ‘ไปฌ่พ“ๅ…ฅ็—…ไบบ็š„ๅŸบๅ› ๆ•ฐๆฎ(ๆฏไธช็—…ไบบ็š„ๅŸบๅ› ๆœ‰20000ไธชๅฑžๆ€ง๏ผŒๆ•ฐๆฎ็ปดๅบฆๆœ‰20000ไธช)๏ผŒๆœ‰ไธ€็ฐ‡็—…ไบบๆ‚ฃไบ†ๅธ•้‡‘ๆฃฎ็—…๏ผŒ่ฟ™ไบ›็—…ไบบๅช้œ€่ฆ็œ‹100ไธชๅŸบๅ› ๅฐฑๅฏไปฅ็Ÿฅ้“๏ผŒ้‚ฃไนˆๆˆ‘ไปฌ็งฐ่ฟ™ไธชๅญ้›†ๅˆๅญ˜ๅœจไบŽ100็ปด้‡Œใ€‚ๆขๅฅ่ฏ่ฏด๏ผŒๅญ็ฉบ้—ด่š็ฑปๆ˜ฏไผ ็ปŸN็ปด่š็ฑปๅˆ†ๆž็š„ๆ‰ฉๅฑ•๏ผŒๅฎƒๅ…่ฎธ้€š่ฟ‡ๅˆ›ๅปบ**่กŒ**ๅ’Œ**ๅˆ—**ๅŒๆ—ถ่ฟ›่กŒ่š็ฑปใ€‚๏ผˆไผ ็ปŸ็š„ๅชๆ˜ฏๅฏน่กŒ่š็ฑป๏ผŒๅญ็ฉบ้—ด่š็ฑปๆ˜ฏๅŒๆ—ถๅฏน่กŒๅ’Œๅˆ—่š็ฑป๏ผ‰ ๅญ็ฉบ้—ด็š„่š็ฑป็ป“ๆžœๅฏ่ƒฝๅœจๅฑžๆ€ง๏ผˆ่กŒ๏ผ‰ๅ’Œ่ง‚ๆต‹ๅ€ผ๏ผˆๅˆ—๏ผ‰ไน‹้—ดๆœ‰้‡ๅ ๏ผŒๅฆ‚ไธŠๅ›พ๏ผŒๅ‡บ่‡ชๆญค[paper](https://www.kdd.org/exploration_files/parsons.pdf)ใ€‚ ๅฏไปฅ็œ‹ๅˆฐ๏ผŒ่ฟ™้‡Œ็ซŸ็„ถๅฏไปฅๆŠŠๆ•ฐๆฎ่šๆˆ4็ฑป๏ผŒไธคไธช็ฐ‡ไน‹้—ด็š„ๅ…ƒ็ด ๅฏไปฅ็ฆปๅพ—ๅพˆ่ฟ‘๏ผŒไฝ†ไนŸไธไผšๅนฒๆ‰ฐๅˆฐๅญ็ฉบ้—ด่š็ฑปใ€‚ไผ ็ปŸ็š„่š็ฑปๆ–นๆณ•ๅˆ™ๅพˆๅฎนๆ˜“่ขซๅนฒๆ‰ฐ ไธŠ้ข็š„ๆ•ฐๆฎ๏ผŒๅฆ‚ๆžœไฝ ไปŽไธ‰ไธช็ปดๅบฆ่ง‚็œ‹๏ผŒไผšๅ‘็Žฐๆฏไธช็ปดๅบฆไธญ๏ผŒ้ƒฝๆœ‰ไธๅŒ็ฐ‡็š„ๆ•ฐๆฎ็ณ…ๅˆๅœจไธ€่ตท ๅญ็ฉบ้—ด่š็ฑป็š„็ง็ฑป ๅŸบไบŽๆœ็ดข็ญ–็•ฅ๏ผŒๆˆ‘ไปฌๅฏไปฅๅŒบๅˆ†ไธค็ฑปๅญ็ฉบ้—ด่š็ฑป็ฎ—ๆณ•๏ผš 1. ็”ฑไธ‹่‡ณไธŠ็ฎ—ๆณ•ไปŽๆ‰พๅˆฐไฝŽ็ปดๅบฆ็š„๏ผˆ1D๏ผ‰่š็ฑปๅผ€ๅง‹๏ผŒ้€ๆธ่žๅˆ๏ผŒไปฅ่ฟ›่กŒ้ซ˜็ปดๅบฆ็š„ๅค„็†ใ€‚+ ็”ฑไธŠ่‡ณไธ‹็ฎ—ๆณ•ไปŽๅ…จ้ƒจ็ปดๅบฆๅผ€ๅง‹ๆ‰พๅˆฐ่š็ฑป๏ผŒๅฆ‚ไฝ•ๅผ€ๅง‹่ฏ„ไผฐๆฏไธช่š็ฑป็š„ๅญ็ฉบ้—ดใ€‚ไธ‹ๅ›พๅฑ•็คบ็š„ๆ˜ฏๅธธ่ง็š„ๅญ็ฉบ้—ด่š็ฑป็ฎ—ๆณ• ๆดพ็ณป็ฎ—ๆณ•๏ผˆClique algorithm๏ผ‰ ็ฎ€ๅ•ๆฅ่ฏด๏ผŒ็ฎ—ๆณ•็ป่ฟ‡ไบ†ไปฅไธ‹ๆญฅ้ชค๏ผš> ๅฏนๆฏไธช็ปดๅบฆ(ๅฑžๆ€ง๏ผŒfeature)๏ผŒๆˆ‘ไปฌๅฐ†็ฉบ้—ดๅˆ†ๅ‰ฒไธบnBinsไธชๆ ผๅญ๏ผˆ็ฌฌไธ€ไธชๅ‚ๆ•ฐ๏ผ‰๏ผŒๅนถๅฏนๆฏไธชๆ ผๅญ่ฎก็ฎ—็›ดๆ–นๅ›พ๏ผˆๆ•ฐๆฎ้‡๏ผ‰ใ€‚ๆˆ‘ไปฌๅช่€ƒ่™‘ๅฏ†ๅบฆๅคง็š„ๅ•ๅ…ƒ(dense units)๏ผŒๅณๆ ผๅญ้‡Œๆ•ฐๆฎ้‡ๅคงไบŽๆŸไธช็ป™ๅฎš็š„้˜ˆๅ€ผnPoints๏ผˆ็ฌฌไบŒไธชๅ‚ๆ•ฐ๏ผ‰ใ€‚ๆฏไธชdense unitsๅˆๅธฆ็€ๅฑžๆ€ง๏ผš1. ๅฎƒๆ‰€ๅœจ็š„็ปดๅบฆ(ๅฑžๆ€ง๏ผŒfeature)2. ๆ ผๅญ็š„็ผ–ๅท3. ๆ ผๅญไธญ็š„ๆ•ฐๆฎ [ไปฃ็ ](https://github.com/ciortanmadalina/medium/blob/master/clique_clustering.ipynb) ###Code from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler n_components = 4 data, truth = make_blobs(n_samples=100, centers=n_components, random_state=42, n_features=2) data = preprocessing.MinMaxScaler().fit_transform(data) plt.scatter(data[:, 0], data[:, 1], s=50, c = truth) plt.title(f"Example of a mixture of {n_components} distributions") plt.xlabel("Feature 1") plt.ylabel("Feature 2"); ###Output _____no_output_____ ###Markdown ่ฏฅๆ•ฐๆฎ้›†ไธญ๏ผŒๆœ‰2ไธช็ปดๅบฆ๏ผŒๅญ˜ๅœจ4ไธช็ฐ‡ใ€‚็ฎ—ๆณ•้€‰ๆ‹ฉๅ‚ๆ•ฐnBins = 8,nPoints = 2ใ€‚ ็ฎ—ๆณ•้€‰ๆ‹ฉ็”ฑไธ‹่‡ณไธŠ็š„ๆ€่ทฏ๏ผŒไปŽ1Dๅผ€ๅง‹ใ€‚ๅฆ‚ๆžœ2ไธชไปฅไธŠ็š„dense unitsไธบ้‚ปๅฑ…๏ผŒๅˆ™ๅฐ†ๅฎƒไปฌๅˆๅนถไธบไธ€ไธชๆ›ดๅคง็š„binใ€‚ๅฆ‚ๆžœๅฐ†่ฟ™ไบ›ๆ ผๅญ่ฝฌๆขไธบ็ฝ‘็ปœๅ›พ๏ผŒ่ฟ™ไธชๆ“ไฝœๅฐฑๅฏไปฅๅพˆๅฎนๆ˜“ๅœฐ่ฟ›่กŒ๏ผŒๅˆ›ๅปบๅ›พ็š„ๆ—ถๅ€™๏ผŒๆฏไธชdense unitsไธบ่Š‚็‚น๏ผŒๅฆ‚ๆžœไธคไธช่Š‚็‚นๅฑžไบŽๅŒไธ€็ปดๅบฆ๏ผŒ่€Œไธ”ไป–ไปฌ็›ธ้‚ป๏ผˆไป–ไปฌไน‹้—ด็š„่ท็ฆปไธ่ถ…่ฟ‡1๏ผ‰๏ผŒๅˆ™ๅœจไธคไธช่Š‚็‚นไน‹้—ด็”Ÿๆˆ่พนใ€‚ ๆจช่ฝด็ปดๅบฆๅŒบๅˆ†ไธค็ฐ‡็บต่ฝด็ปดๅบฆๅŒบๅˆ†ไธ‰็ฐ‡ ๆŽฅไธ‹ๆฅ๏ผŒๆˆ‘ไปฌ่ฆไปŽ2Dๅผ€ๅง‹ๅˆฐๆ‰€ๆœ‰D๏ผŒ่ฎก็ฎ—ๆ‰€ๆœ‰ๅฏ่กŒ็š„็š„็ฐ‡ใ€‚่ฟ™ไธชๆ“ไฝœๅฏไปฅ่ฝฌๅŒ–ไธบ่ฎก็ฎ—kไธช็ปดๅบฆ็š„dense units็š„่žๅˆ๏ผŒๅนถๅชไฟ็•™ๆœ‰้‡ๅ ็š„ไธ”่ฟž็ปญ็š„dense bins็š„็ป“ๆžœใ€‚ๅœจ่ฎก็ฎ—ๅฎŒk-1็ปดๅบฆ็š„dense unitsๅŽ๏ผŒๆˆ‘ไปฌๅฏไปฅ้€š่ฟ‡่ฎก็ฎ—ๆ‰€ๆœ‰k-1็ปดๅบฆ็š„dense units็š„่žๅˆ่€Œๆ‰ฉๅฑ•ๅˆฐ็ฌฌk็ปดใ€‚ ๅœจไธŠ้ข็š„ๆ•ฐๆฎไธญ๏ผŒๆˆ‘ไปฌๅฏไปฅๅพ—ๅˆฐไธ‹ๅ›พ็š„่š็ฑป็ป“ๆžœใ€‚ ็ดซ่‰ฒ็‚นไธๅฑžไบŽไปปไฝ•็ฑป็ฐ‡๏ผŒๅ› ไธบไป–ไปฌๆ‰€ๅฑž็š„ๆ …ๆ ผไธญๆ•ฐๆฎไธชๆ•ฐๅฐไบŽ2ไธช๏ผˆnPoints๏ผ‰ ๆดพ็ณป็ฎ—ๆณ•ๅฏนๅฎƒๅ‚ๆ•ฐ็š„้€‰ๆ‹ฉ้žๅธธๆ•ๆ„Ÿ๏ผˆnBinsๅ’ŒnPoints๏ผ‰ใ€‚ไธ่ฟ‡๏ผŒๅฎƒๆ˜ฏ็”ฑไธ‹่‡ณไธŠ็ฎ—ๆณ•ๅฎถๆ—ไธญๆœ€ๅŸบ็ก€็š„็ฎ—ๆณ•ใ€‚ ๅŸบไบŽ่ฐฑ่š็ฑป็š„ๅญ็ฉบ้—ด่š็ฑป ๅฎž้™…ไธŠ๏ผŒ่ฐฑ่š็ฑปๅฎž็Žฐ็š„ๅฐฑๆ˜ฏๅญ็ฉบ้—ด่š็ฑป ๅ›žๆƒณไธ€ไธ‹๏ผŒๆˆ‘ไปฌๅœจ่ฐฑ่š็ฑปๆœ€ๅŽไธ€ๆญฅ็š„ๆ—ถๅ€™,$L$็Ÿฉ้˜ตๆœ‰$n$ไธช็ปดๅบฆ๏ผŒๆˆ‘ไปฌๅช้€š่ฟ‡ๆœ€ๅฐ็š„$k$ไธช็‰นๅพๅ€ผๆ‰€ๅฏนๅบ”็š„็‰นๅพๅ‘้‡ๅฏนๆ•ฐๆฎ่ฟ›่กŒkmeans่š็ฑป๏ผŒๅฐฑๅช่€ƒ่™‘ไบ†$k$ไธช็ปดๅบฆ๏ผŒ่ฟ™ไธๅฐฑๆ˜ฏๅญ็ฉบ้—ดๅ— ๅ‰้ขๆˆ‘ไปฌ่ฎฒๅˆฐ๏ผŒ่ฐฑ่š็ฑปไธญ้œ€่ฆๆž„ๅปบ็›ธไผผ็Ÿฉ้˜ต(affinity matrix)๏ผŒไปฅ$W$ๆˆ–่€…$A$่กจ็คบ Self-Expressiveness affinity๏ผˆๅŸบไบŽ่‡ช่กจ่พพๆ€ง็š„็›ธไผผๅบฆ็Ÿฉ้˜ต๏ผ‰ๆฆ‚ๅฟต๏ผšไปŽ็บฟๆ€งๅญ็ฉบ้—ดไธญๆๅ–็š„ไธ€ไธชๆ•ฐๆฎ็‚น$x_i$ๅฏไปฅ็”ฑๅŒไธ€ๅญ็ฉบ้—ดไธญๅ…ถไป–็‚น็š„็บฟๆ€ง็ป„ๅˆ่กจ็คบๅฐ†ๆ‰€ๆœ‰็‚นๅ ๅŠ ๅˆฐๆ•ฐๆฎ็Ÿฉ้˜ต$X$็š„ๅˆ—ไธญ๏ผŒๅ…ถ่‡ช่กจ่พพๆ€งๅฏไปฅ็ฎ€ๅ•ๅœฐๆ่ฟฐไธบ$$X = XC$$ๅ…ถไธญ$C$ไธบSelf-Expressiveness็ณปๆ•ฐ็Ÿฉ้˜ต๏ผŒ$X$็Ÿฉ้˜ตไธบ$(n*m)$๏ผŒ$C$็Ÿฉ้˜ตไธบ$(m*m)$๏ผŒ$XC$็Ÿฉ้˜ตไธบ$(n*m)$๏ผŒ$n$ไธบๆ•ฐๆฎ่ฎฐๅฝ•ๆ•ฐ๏ผŒ$m$ไธบๆฏๆกๆ•ฐๆฎ็š„็ปดๅบฆใ€‚ๅœจ่ฟ™้‡Œ๏ผŒไปฅOD็Ÿฉ้˜ตไธบไพ‹๏ผŒ$n$ไธบODๅฏนๆ•ฐ๏ผŒ$m$ไธบๆ—ถ้—ด๏ผŒๅณ |X็Ÿฉ้˜ต|1ๆ—ฅ|2ๆ—ฅ|...|mๆ—ฅ||----||||||AๅˆฐB|20|30|...|40||AๅˆฐC|20|30|...|40||...|...|...|...|...||n|20|30|...|40||C็Ÿฉ้˜ต|1ๆ—ฅ|2ๆ—ฅ|...|mๆ—ฅ||----||||||1ๆ—ฅ|0|็›ธไผผๅบฆ|...|็›ธไผผๅบฆ||2ๆ—ฅ|็›ธไผผๅบฆ|0|...|็›ธไผผๅบฆ||...|...|...|...|...||mๆ—ฅ|็›ธไผผๅบฆ|็›ธไผผๅบฆ|...|0|ๅ‡่ฎพๅญ็ฉบ้—ด้ƒฝๆ˜ฏ็‹ฌ็ซ‹็š„๏ผŒๆœ€ๅฐๅŒ–$C$็Ÿฉ้˜ต็š„่Œƒๆ•ฐ๏ผŒๅฐฑๅฏไปฅไฟ่ฏ$C$ๅœจไปฅๆŸไบ›้กบๅบๆŽ’ๅˆ—็š„ๆ—ถๅ€™ๅ‡บ็Žฐๅ—็Šถๅฏน่ง’็บฟ็ป“ๆž„๏ผŒไนŸๅ› ๆญคๅฏไปฅไปฅ$C$็Ÿฉ้˜ตๆฅๆž„ๅปบ็›ธไผผๅบฆ็Ÿฉ้˜ต็”จไบŽ่ฐฑ่š็ฑป[P. Ji, M. Salzmann, and H. Li. Efficient dense subspace clustering. In WACV, pages 461โ€“468. IEEE, 2014.] ๅœจๆ•ฐๅญฆไธŠไธบ๏ผš $$min{{ \left\Vert {C} \right\Vert }_{p}}$$ $$s.t. diag(C)=0$$ $$X = XC$$ $$C\geqslant0$$ ๅ…ถไธญ๏ผŒ${ \left\Vert {C} \right\Vert }_{p}$ไธบ็Ÿฉ้˜ต$C$็š„ไปปๆ„่Œƒๆ•ฐๆ‰พๅˆฐ่ฟ™ๆ ทๅญ็š„$C$,ๅฐฑๅฏไปฅๆž„ๅปบๅ‡บ่ฐฑ่š็ฑปๆ‰€้œ€็š„็›ธไผผๅบฆ็Ÿฉ้˜ต$A$:$$A=C^T+C$$ *ๅธธ่ง็š„็Ÿฉ้˜ต่Œƒๆ•ฐๆ€ป็ป“ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{0}}}$ L0่Œƒๆ•ฐ:่กจ็คบๅ‘้‡xไธญ้ž้›ถๅ…ƒ็ด ็š„ไธชๆ•ฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{1}}}$ L1่Œƒๆ•ฐ๏ผˆๅˆ—ๆจก๏ผ‰:่กจ็คบๅ‘้‡xไธญ้ž้›ถๅ…ƒ็ด ็š„็ปๅฏนๅ€ผไน‹ๅ’Œ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{2}}}$ L2่Œƒๆ•ฐ๏ผˆ่ฐฑๆจก๏ผ‰:่กจ็คบๅ‘้‡ๅ…ƒ็ด ็š„ๅนณๆ–นๅ’Œๅ†ๅผ€ๅนณๆ–นๆฑ‚$A^{T}A$็š„็‰นๅพๅ€ผ๏ผŒๆ‰พๅ‡บๅ…ถไธญ็š„ๆœ€ๅคง็‰นๅพๅ€ผ๏ผŒๆฑ‚ๅ…ถๅนณๆ–นๆ น,็›ธๅฝ“ไบŽ$max(sqrt(eig(A^{T}A)))$๏ผŒไนŸๅซ่ฐฑ่Œƒๆ•ฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{F}}}$ F่Œƒๆ•ฐ:ๆ˜ฏๆŠŠไธ€ไธช็Ÿฉ้˜ตไธญๆฏไธชๅ…ƒ็ด ็š„ๅนณๆ–นๆฑ‚ๅ’ŒๅŽๅผ€ๆ นๅท(็”จไบŽ่กจ็คบ็Ÿฉ้˜ต้‡็บง) ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{\infty}}}$ ๆ— ็ฉท่Œƒๆ•ฐ๏ผˆ่กŒๆจก๏ผ‰:ๅบฆ้‡ๅ‘้‡ๅ…ƒ็ด ็š„ๆœ€ๅคงๅ€ผ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{*}}}$ ๆ ธ่Œƒๆ•ฐNuclear Norm:็Ÿฉ้˜ตๅฅ‡ๅผ‚ๅ€ผ็š„ๅ’Œ(็”จไบŽ่กจ็คบไฝŽ็งฉ็Ÿฉ้˜ต) ่€Œๅœจๅฎž้™…็š„ๆ•ฐๆฎ้›†ไธญ๏ผŒๅญ˜ๅœจๆ•ฐๆฎๅ™ชๅฃฐ๏ผŒๅ› ๆญคๅฐ†้—ฎ้ข˜่ฐƒๆ•ดไธบ๏ผš $$min{{ \left\Vert {C} \right\Vert }\mathop{{}}\nolimits_{{F}}}+\frac{\lambda}{2}{{ \left\Vert {X-XC} \right\Vert }\mathop{{}}\nolimits_{{F}}}^2$$ $$s.t. diag(C)=0$$ $$C\geqslant0$$ ๆ‰พๅˆฐ่ฟ™ๆ ทๅญ็š„$C$,ๅฐฑๅฏไปฅ็”จ่ฐฑ่š็ฑป ไธŠ้ข็š„็›ฎๆ ‡ๅ‡ฝๆ•ฐ๏ผŒๅ…ถๅฎž็›ธๅฝ“ไบŽๅŒๆ—ถ่พพๆˆไธคไธช็›ฎๆ ‡๏ผŒๅณ๏ผš1. ${ \left\Vert {C} \right\Vert }\mathop{{}}\nolimits_{{F}}$ๆœ€ๅฐ๏ผŒ่ฟ™ไธชๆ˜ฏไธบไบ†ๆปก่ถณSelf-Expressiveness็ณปๆ•ฐ็Ÿฉ้˜ตๅฎšไน‰2. ${ \left\Vert {X-XC} \right\Vert }\mathop{{}}\nolimits_{{F}}$ๆœ€ๅฐ๏ผŒๅฎž้™…ไธŠ$XC$ๆ˜ฏๆˆ‘ไปฌๆž„ๅปบๅ‡บๆฅ็š„็Ÿฉ้˜ต๏ผŒ$X-XC$ๆ˜ฏๅ™ชๅฃฐ็Ÿฉ้˜ต๏ผŒๅณๅ™ชๅฃฐๆœ€ๅฐ ้‚ฃไนˆ๏ผŒ่ฟ™ไธช็›ฎๆ ‡ๅ‡ฝๆ•ฐ็›ธๅฝ“ไบŽๆˆ‘ไปฌ่ƒฝๅคŸๅŒๆ—ถๅฎŒๆˆๅŽปๅ™ชๅฃฐๅ’Œ้™็ปด็š„ๅทฅไฝœ,ไพ‹ๅฆ‚๏ผš ้‚ฃไนˆ๏ผŒๅฆ‚ไฝ•ๆฑ‚่งฃ๏ผŒ$C$็Ÿฉ้˜ตๆ€Žไนˆ่Žทๅพ—? Deep Adversarial Subspace Clustering๏ผˆ2018ๅนดๆœบๅ™จๅญฆไน ้ข†ๅŸŸ่ฎบๆ–‡๏ผ‰ [ใ€่ฎบๆ–‡้˜…่ฏปใ€‘Deep Adversarial Subspace Clustering](https://www.cnblogs.com/EstherLjy/p/9840016.html) [Deep-subspace-clustering-networks github](https://github.com/panji1990/Deep-subspace-clustering-networks) ็ป™ๅ‡บ็š„่งฃๅ†ณๆ–นๆกˆๆ˜ฏๆญๅปบDeep Convolutional Auto-Encoder ๆŸๅคฑๅ‡ฝๆ•ฐไธบ ๅ…ถไธญ ###Code import torch import torch.nn as nn import torch.utils.data as Data import torchvision import matplotlib.pyplot as plt EPOCH = 100 BATCH_SIZE = 500 LR = 0.01 DOWNLOAD_MNIST = False train_data = torchvision.datasets.MNIST( root = './mnist', train = True, transform = torchvision.transforms.ToTensor(), #ไปŽ0-255ๅŽ‹็ผฉๅˆฐ0-1 download =DOWNLOAD_MNIST ) # ๅ…ˆ่ฝฌๆขๆˆ torch ่ƒฝ่ฏ†ๅˆซ็š„ Dataset torch_dataset = Data.TensorDataset(train_data.train_data[:BATCH_SIZE], train_data.train_labels[:BATCH_SIZE]) # ๆŠŠ dataset ๆ”พๅ…ฅ DataLoader loader = Data.DataLoader( dataset=torch_dataset, # torch TensorDataset format batch_size=BATCH_SIZE, # mini batch size shuffle=False, # ่ฆไธ่ฆๆ‰“ไนฑๆ•ฐๆฎ (ๆ‰“ไนฑๆฏ”่พƒๅฅฝ) num_workers=2, # ๅคš็บฟ็จ‹ๆฅ่ฏปๆ•ฐๆฎ ) i = 0 def printdata(x,y): plt.imshow(x,cmap = 'gray') plt.title(y) plt.show() printdata(train_data.train_data[i],str(train_data.train_labels[i].numpy())) import numpy as np class DSC(nn.Module): def __init__(self): super().__init__() self.encoder = nn.Sequential( nn.Linear(28*28,128), nn.Tanh(), nn.Linear(128,64), nn.Tanh(), nn.Linear(64,12), nn.Tanh(), nn.Linear(12,5), ) self.selfexpr = nn.Linear(BATCH_SIZE,BATCH_SIZE,bias=False) self.decoder = nn.Sequential( nn.Linear(5,12), nn.Tanh(), nn.Linear(12,64), nn.Tanh(), nn.Linear(64,128), nn.Tanh(), nn.Linear(128,28*28), nn.Sigmoid() ) def forward(self,x): z = self.encoder(x) z = torch.transpose(z, 1, 0) z_ = self.selfexpr(z) z_ = torch.transpose(z_, 1, 0) x_ = self.decoder(z_) return z,z_,x_ dsc = DSC() dsc.cuda() optimizer = torch.optim.Adam(dsc.parameters(),lr = LR,betas = (0.9,0.99)) loss_func = torch.nn.MSELoss() loss_list = [] lambda1 = 1 lambda2 = 10**(epoch/10-3) steps = [] for epoch in range(1000): # ่ฎญ็ปƒๆ‰€ๆœ‰!ๆ•ดๅฅ—!ๆ•ฐๆฎ EPOCH ๆฌก for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธ€ๆญฅ loader ้‡Šๆ”พไธ€ๅฐๆ‰นๆ•ฐๆฎ็”จๆฅๅญฆไน  steps.append(step) batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255).cuda() batch_y = batch_y.cuda() z,z_,x_ = dsc(batch_x) C = dsc.selfexpr.state_dict()['weight'] loss = 1/2*torch.norm(x_-batch_x)**2+lambda1*torch.norm(C)+lambda2/2*torch.norm(torch.transpose(z, 1, 0)-z_)**2 loss_list.append(loss) optimizer.zero_grad() #ๅˆๅง‹ๅŒ–ๆขฏๅบฆ loss.backward() #่ฎก็ฎ—ๆขฏๅบฆ optimizer.step() #ๅฏนๅ‚ๆ•ฐๅญฆไน  import IPython IPython.display.clear_output(wait=True) plt.plot(range(len(steps[-50:])),loss_list[-50:]) plt.ylabel('loss') plt.xlabel('train step') plt.show() C = dsc.selfexpr.state_dict()['weight'].cpu().numpy() plt.imshow(C,cmap = 'Greys') plt.show() for epoch in range(1000): # ่ฎญ็ปƒๆ‰€ๆœ‰!ๆ•ดๅฅ—!ๆ•ฐๆฎ EPOCH ๆฌก for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธ€ๆญฅ loader ้‡Šๆ”พไธ€ๅฐๆ‰นๆ•ฐๆฎ็”จๆฅๅญฆไน  steps.append(step) batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255).cuda() batch_y = batch_y.cuda() z,z_,x_ = dsc(batch_x) C = dsc.selfexpr.state_dict()['weight'] loss = 1/2*torch.norm(x_-batch_x)**2+lambda1*torch.norm(C)+lambda2/2*torch.norm(torch.transpose(z, 1, 0)-z_)**2 loss_list.append(loss) optimizer.zero_grad() #ๅˆๅง‹ๅŒ–ๆขฏๅบฆ loss.backward() #่ฎก็ฎ—ๆขฏๅบฆ optimizer.step() #ๅฏนๅ‚ๆ•ฐๅญฆไน  import IPython IPython.display.clear_output(wait=True) plt.plot(range(len(steps[-50:])),loss_list[-50:]) plt.ylabel('loss') plt.xlabel('train step') plt.show() i = 10 def printdata(x,y): plt.imshow(x,cmap = 'gray') plt.title(y) plt.show() printdata(x_[i].view(28,28).cpu().data.numpy(),'test') def printdata(x,y): plt.imshow(x,cmap = 'gray') plt.title(y) plt.show() printdata(batch_x[i].view(28,28).cpu().data.numpy(),'test') A = C+C.T plt.imshow(A,cmap = 'Greys') plt.show() D = np.diag(A.sum(axis=1)) L = D - A #่ฎก็ฎ—็‰นๅพๅ€ผๅ’Œ็‰นๅพๅ‘้‡ vals, vecs = np.linalg.eig(L) #้‡ๆ–ฐๆŽ’ๅบ vecs = vecs[:,np.argsort(vals)] vals = vals[np.argsort(vals)] n = 10 #็”จkmeansๅฏน็ฌฌ2ๅˆฐ็ฌฌ4ไธช็‰นๅพๅ‘้‡่šๆˆ4็ฑป from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=n) kmeans.fit(vecs[:,:n]) spectral_labels = kmeans.labels_ spectral_labels batch_y ###Output _____no_output_____ ###Markdown ๅฎŸ้จ“ใ‚’ๅง‹ใพใ‚Šใพใ™๏ผ pre training strategies ###Code import torch import torch.nn as nn import torch.utils.data as Data import torchvision import matplotlib.pyplot as plt EPOCH = 2 BATCH_SIZE = 50 LR = 0.01 DOWNLOAD_MNIST = False train_data = torchvision.datasets.MNIST( root = './mnist', train = True, transform = torchvision.transforms.ToTensor(), #ไปŽ0-255ๅŽ‹็ผฉๅˆฐ0-1 download =DOWNLOAD_MNIST ) # ๅ…ˆ่ฝฌๆขๆˆ torch ่ƒฝ่ฏ†ๅˆซ็š„ Dataset torch_dataset = Data.TensorDataset(train_data.train_data, train_data.train_labels) # ๆŠŠ dataset ๆ”พๅ…ฅ DataLoader loader = Data.DataLoader( dataset=torch_dataset, # torch TensorDataset format batch_size=BATCH_SIZE, # mini batch size shuffle=True, # ่ฆไธ่ฆๆ‰“ไนฑๆ•ฐๆฎ (ๆ‰“ไนฑๆฏ”่พƒๅฅฝ) num_workers=2, # ๅคš็บฟ็จ‹ๆฅ่ฏปๆ•ฐๆฎ ) class AutoEncoder(nn.Module): def __init__(self): super().__init__() self.encoder = nn.Sequential( nn.Linear(28*28,128), nn.Tanh(), nn.Linear(128,64), nn.Tanh(), nn.Linear(64,12), nn.Tanh(), nn.Linear(12,10), ) self.decoder = nn.Sequential( nn.Linear(10,12), nn.Tanh(), nn.Linear(12,64), nn.Tanh(), nn.Linear(64,128), nn.Tanh(), nn.Linear(128,28*28), nn.Sigmoid() ) def forward(self,x): encoded = self.encoder(x) decoded = self.decoder(encoded) return encoded,decoded autoencoder = AutoEncoder() autoencoder.cuda() optimizer = torch.optim.Adam(autoencoder.parameters(),lr = LR,betas = (0.9,0.99)) loss_func = torch.nn.MSELoss() loss_list = [] for epoch in range(EPOCH): # ่ฎญ็ปƒๆ‰€ๆœ‰!ๆ•ดๅฅ—!ๆ•ฐๆฎ EPOCH ๆฌก for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธ€ๆญฅ loader ้‡Šๆ”พไธ€ๅฐๆ‰นๆ•ฐๆฎ็”จๆฅๅญฆไน  batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255).cuda() batch_y = batch_y.cuda() encoded,decoded = autoencoder(batch_x) loss = loss_func(decoded,batch_x) loss_list.append(loss) optimizer.zero_grad() #ๅˆๅง‹ๅŒ–ๆขฏๅบฆ loss.backward() #่ฎก็ฎ—ๆขฏๅบฆ optimizer.step() #ๅฏนๅ‚ๆ•ฐๅญฆไน  import IPython IPython.display.clear_output(wait=True) plt.ylabel('loss') plt.xlabel('train step') plt.plot(range(len(loss_list)),loss_list,label='Adam') plt.legend() plt.show() def printdata(x,y): plt.imshow(x,cmap = 'gray') plt.title(y) plt.show() for i in range(3): printdata(decoded[i].view(28,28).cpu().data.numpy(),str(batch_y[i].cpu().numpy())) printdata(batch_x[i].view(28,28).cpu().numpy(),str(batch_y[i].cpu().numpy())) ###Output _____no_output_____ ###Markdown fine-tuning strategies ๆๅ–ๅ‚ๆ•ฐ ###Code import numpy as np latent_features = 10 class DSC(nn.Module): def __init__(self,autoencoder,BATCH_SIZE): super().__init__() self.encoder = nn.Sequential( nn.Linear(28*28,128), nn.Tanh(), nn.Linear(128,64), nn.Tanh(), nn.Linear(64,12), nn.Tanh(), nn.Linear(12,10), ) self.encoder.load_state_dict(autoencoder.encoder.state_dict()) kill_matrix = np.ones((BATCH_SIZE,BATCH_SIZE)) for i in range(BATCH_SIZE): kill_matrix[i][i]=0 self.k = torch.tensor(kill_matrix,dtype=torch.float, requires_grad=False) self.m = torch.zeros([BATCH_SIZE,BATCH_SIZE],dtype=torch.float ,requires_grad=True) self.decoder = nn.Sequential( nn.Linear(10,12), nn.Tanh(), nn.Linear(12,64), nn.Tanh(), nn.Linear(64,128), nn.Tanh(), nn.Linear(128,28*28), nn.Sigmoid() ) self.decoder.load_state_dict(autoencoder.decoder.state_dict()) ### //ไธ่ฎญ็ปƒๆŸไบ›ๅฑ‚ frozen_layers = [self.encoder, self.decoder,] for layer in frozen_layers: for name, value in layer.named_parameters(): value.requires_grad = False def forward(self,x): z = self.encoder(x) z = torch.transpose(z, 1, 0) c = self.k.mul(self.m) z_ = z.mm(c) x_ = torch.transpose(z_, 1, 0) x_ = self.decoder(x_) return z,z_,x_,c ###Output _____no_output_____ ###Markdown ๅผ€ๅง‹็‚ผไธน ###Code DOWNLOAD_MNIST = False train_data = torchvision.datasets.MNIST( root = './mnist', train = True, transform = torchvision.transforms.ToTensor(), #ไปŽ0-255ๅŽ‹็ผฉๅˆฐ0-1 download =DOWNLOAD_MNIST ) # ๅ…ˆ่ฝฌๆขๆˆ torch ่ƒฝ่ฏ†ๅˆซ็š„ Dataset torch_dataset = Data.TensorDataset(train_data.train_data[:BATCH_SIZE], train_data.train_labels[:BATCH_SIZE]) # ๆŠŠ dataset ๆ”พๅ…ฅ DataLoader loader = Data.DataLoader( dataset=torch_dataset, # torch TensorDataset format batch_size=BATCH_SIZE, # mini batch size shuffle=False, # ่ฆไธ่ฆๆ‰“ไนฑๆ•ฐๆฎ (ๆ‰“ไนฑๆฏ”่พƒๅฅฝ) num_workers=2, # ๅคš็บฟ็จ‹ๆฅ่ฏปๆ•ฐๆฎ ) import torch import torch.nn as nn import torch.utils.data as Data import torchvision import matplotlib.pyplot as plt K = 10 EPOCH = 50+25*K BATCH_SIZE = 64 LR = 0.01 DOWNLOAD_MNIST = False train_data = torchvision.datasets.MNIST( root = './mnist', train = True, transform = torchvision.transforms.ToTensor(), #ไปŽ0-255ๅŽ‹็ผฉๅˆฐ0-1 download =DOWNLOAD_MNIST ) # ๅ…ˆ่ฝฌๆขๆˆ torch ่ƒฝ่ฏ†ๅˆซ็š„ Dataset torch_dataset = Data.TensorDataset(train_data.train_data[:BATCH_SIZE], train_data.train_labels[:BATCH_SIZE]) # ๆŠŠ dataset ๆ”พๅ…ฅ DataLoader loader = Data.DataLoader( dataset=torch_dataset, # torch TensorDataset format batch_size=BATCH_SIZE, # mini batch size shuffle=False, # ่ฆไธ่ฆๆ‰“ไนฑๆ•ฐๆฎ (ๆ‰“ไนฑๆฏ”่พƒๅฅฝ) num_workers=2, # ๅคš็บฟ็จ‹ๆฅ่ฏปๆ•ฐๆฎ ) dsc = DSC(autoencoder,BATCH_SIZE) dsc optimizer = torch.optim.Adam([dsc.m],lr = LR,betas = (0.9,0.99),weight_decay=1e-5) loss_func = torch.nn.MSELoss() loss_list = [] lambda1 = 1 lambda2 = 10**(K/10-3) steps = [] t = 0 for epoch in range(EPOCH): # ่ฎญ็ปƒๆ‰€ๆœ‰!ๆ•ดๅฅ—!ๆ•ฐๆฎ EPOCH ๆฌก for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธ€ๆญฅ loader ้‡Šๆ”พไธ€ๅฐๆ‰นๆ•ฐๆฎ็”จๆฅๅญฆไน  t+=1 steps.append(t) batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255) batch_y = batch_y batch_y,index_ = batch_y.sort() batch_x = batch_x[index_] z,z_,x_,c = dsc(batch_x) loss = 1/2*torch.norm(x_-batch_x)**2+lambda1*torch.norm(c)+\ lambda2/2*torch.norm(z-z_)**2 loss_list.append(loss) optimizer.zero_grad() #ๅˆๅง‹ๅŒ–ๆขฏๅบฆ loss.backward() #่ฎก็ฎ—ๆขฏๅบฆ optimizer.step() #ๅฏนๅ‚ๆ•ฐๅญฆไน  import IPython IPython.display.clear_output(wait=True) plt.plot(steps[-30:],loss_list[-30:]) plt.ylabel('loss') plt.xlabel('train step') plt.show() C = c.data.numpy() plt.imshow(C,cmap = 'Greys') plt.show() def printdata(x,y): plt.imshow(x,cmap = 'gray') plt.title(y) plt.show() for i in range(3): printdata(x_[i].view(28,28).cpu().data.numpy(),str(batch_y[i].cpu().numpy())) printdata(batch_x[i].view(28,28).cpu().numpy(),str(batch_y[i].cpu().numpy())) np.matmul(z.numpy(),C),z.data.numpy() C C = c.data.numpy() plt.imshow(abs(C),cmap = 'Greys') plt.show() A = abs(C)+abs(C.T) plt.imshow(A,cmap = 'Greys') plt.show() D = np.diag(A.sum(axis=1)) L = D - A #่ฎก็ฎ—็‰นๅพๅ€ผๅ’Œ็‰นๅพๅ‘้‡ vals, vecs = np.linalg.eig(L) #้‡ๆ–ฐๆŽ’ๅบ vecs = vecs[:,np.argsort(vals)] vals = vals[np.argsort(vals)] n = 2 #็”จkmeansๅฏน็ฌฌ2ๅˆฐ็ฌฌ4ไธช็‰นๅพๅ‘้‡่šๆˆ4็ฑป from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=n) kmeans.fit(vecs[:,:n]) spectral_labels = kmeans.labels_ spectral_labels plt.plot(range(len(vals)),vals) import pandas as pd a = pd.DataFrame(vals) eigngap = a-a.shift().fillna(0) plt.plot(range(len(eigngap)),eigngap) plt.xlim(0,50) plt.show() z.size() ###Output _____no_output_____
Files/2017-10-24-Sensors-for-Smart-Cities.ipynb
###Markdown **Smart Cities: the use of sensors to tackle the Urban Heat Island effect** *A Data Science for Smart Cities project carried out with Jupyter and Python* Data source: [Road Weather Information Stations, City of Seattle Open Data Portal.](https://data.seattle.gov/Transportation/Road-Weather-Information-Stations/egc4-d24i) ###Code #Import libraries import dask.dataframe as dd #High-performance data frame handler import pandas as pd import numpy as np #Locate file and load it as data frame with proper data types filename = 'Road_Weather_Information_Stations.csv' df = dd.read_csv(filename, dtype={'StationName': str, 'StationLocation': object, 'RecordId':int, 'RoadSurfaceTemperature': float, 'AirTemperature': float}, parse_dates=['DateTime']) #Visualize first 5 rows df.head(5) #Visualize last 5 of 708,891 rows df.tail(5) #Visualize data types df._meta.dtypes #object=string ###Output _____no_output_____ ###Markdown Notice Python offers powerful tools to interact with very large files, such as the dask package, but one significant costraint is the machine on which it operates.Machines with limited resources require long computational time, **slowing down the working pipeline**. For this reason, the City of Seattle Open Data Portal allows to quickly query and slice the dataset online. This approach was followed in order to **preserve the computational resources**, an option that should always be considered whenever possible.The hottest month on average is August, so the online query covers the 01 August 2016 - 31 August 2016 period. ###Code import pandas as pd import datetime as DT #Load the csv file for August 2016 filename='August2016_Road_Weather_Information_Stations.csv' august=pd.read_csv(filename, dtype={'StationName': str, 'StationLocation': object, 'RecordId':int, 'RoadSurfaceTemperature': float, 'AirTemperature': float}) #Visualize first 5 values august.head(5) #Visualize data types august.dtypes #object=string #Separate date from time august['Date'] = [d.date() for d in august['DateTime']] august['Time'] = [d.time() for d in august['DateTime']] #Visualize august.head(5) #Drop DateTime Air and Temperature column august=august.drop('DateTime', axis=1) august=august.drop('AirTemperature', axis=1) #Sort the dataframe august.sort_values(by=['StationName'], inplace=True) #Set the index to be this and don't drop august.set_index(keys=['StationName'], drop=False,inplace=True) #Get a list of stations stations=august['StationName'].unique().tolist() #Create split dataframes alaskan = august.loc[august.StationName=='AlaskanWayViaduct_KingSt'] albro = august.loc[august.StationName=='AlbroPlaceAirportWay'] aurora = august.loc[august.StationName=='AuroraBridge'] harbor = august.loc[august.StationName=='HarborAveUpperNorthBridge'] joseriza = august.loc[august.StationName=='JoseRizalBridgeNorth'] magnolia = august.loc[august.StationName=='MagnoliaBridge'] myrtle = august.loc[august.StationName=='35thAveSW_SWMyrtleSt'] ne45 = august.loc[august.StationName=='NE45StViaduct'] roosevelt = august.loc[august.StationName=='RooseveltWay_NE80thSt'] spokane = august.loc[august.StationName=='SpokaneSwingBridge'] #Create list with data frame names for reference station_list=[alaskan,albro,aurora,harbor,joseriza,magnolia,myrtle,ne45,roosevelt,spokane] #Compute average temperature for each time interval and station (e.g., time series) for August 2016 #and plot the series import matplotlib.pyplot as plt #Create empty image fig = plt.figure(figsize=(15,18)) plt.subplots_adjust(hspace=0.3) #Empty lists to store peak times, station names, and max road surface temperatures peak=[] names=[] #Loop through the station list for i, station in enumerate(station_list): mean_t=station.groupby('Time').RoadSurfaceTemperature.mean() #Convert Fahrenheit degrees into Celsius degrees mean_t=mean_t.apply(lambda x: ((x - 32) * 5/9)) #Save station names, peak times, and temperatures peak.append(mean_t.idxmax().strftime("%H:%M:%S")) names.append(str(station.StationName[1])) #Add plot ax = fig.add_subplot(5, 2, i+1) plt.scatter(mean_t.index, mean_t) plt.title(str(station.StationName[1])) plt.xlabel("Time") plt.ylabel("degrees Celsius") plt.ylim(15,40) plt.show() #Show peak times peak_df=pd.DataFrame({'Station Name' : names, 'Peak Time': peak}) peak_df=peak_df[['Station Name','Peak Time']] peak_df #Extract average road surface temperatures for the 3:50PM-4:10PM interval #Empty dictionary to store station names and average peak road surface temperatures peak_temp={} #Loop through the station list for i, station in enumerate(station_list): mean_t=station.groupby('Time').RoadSurfaceTemperature.mean() mean_t=mean_t.apply(lambda x: ((x - 32) * 5/9)) #Save the average temperature between 3:50PM and 4:10PM peak_temp[str(station.StationName[1])]=round(mean_t[950:971].mean(),3) #Visualize data frame max_t=pd.DataFrame({'Station Name':peak_temp.keys(), 'Max Temperature': peak_temp.values()}) max_t=max_t[['Station Name', 'Max Temperature']] max_t ###Output _____no_output_____ ###Markdown Interactive plotting ###Code #Sensor locations sensors=pd.read_csv('sensors.csv') #Add temperature column complete=pd.merge(sensors, max_t, on='Station Name', how='outer') complete import numpy as np import pandas as pd import folium import branca import matplotlib.pyplot as plt #Set coordinates SEATTLE_COORDINATES = (47.59844, -122.33561) #Empty map zoomed in on Seattle map = folium.Map(location=SEATTLE_COORDINATES, zoom_start=11.48, tiles='Stamen Terrain') #Create group of circle markers f = folium.map.FeatureGroup() lats=complete['Lat'].tolist() lngs=complete['Lon'].tolist() sizes=complete['Max Temperature'].tolist() popup=complete['Station Name'].tolist() #Colormap colors=np.asarray(sizes) cm = branca.colormap.LinearColormap(['green', 'yellow', 'red'], vmin=25, vmax=41) cm.caption = 'Road Surface Temperature [degrees Celsius]' map.add_child(cm) #Add color-coded circles for lat, lng, size, color in zip(lats, lngs, sizes, colors): f.add_child(folium.features.CircleMarker( [lat, lng], radius=size, color=None, #popup=popup, fill_color=cm(color))) map.add_child(f) #Add markers with popups for each in complete[0:10].iterrows(): folium.Marker([each[1]['Lat'],each[1]['Lon']], popup='Bridge: '+each[1]['Station Name']+'; Max Temp: '+str(each[1]['Max Temperature'])).add_to(map) #Display map ###Output _____no_output_____
test/py_function_file_test.ipynb
###Markdown Variable Length Arguments ###Code def vfunc(arg1, *args): print('arg1: ', arg1) if args is not (): print('>>> args:', args, ', type: tuple, ', end='') for index, item in enumerate(args): print('args[{}]:'.format(index), item, end=' ') return vfunc(666) vfunc(33, 44, 55, 66, 77, 88) ###Output arg1: 666 arg1: 33 >>> args: (44, 55, 66, 77, 88) , type: tuple, args[0]: 44 args[1]: 55 args[2]: 66 args[3]: 77 args[4]: 88 ###Markdown Anonymous / Lambda ###Code from math import pi perimeter = lambda r: r * pi * 2 perimeter(10) ###Output _____no_output_____ ###Markdown Module dir() ###Code import math print(dir(math)) ###Output ['__doc__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2', 'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tau', 'trunc'] ###Markdown Assertion and Exception ###Code try : p = int(input('input a number:')) assert (p>=0), 'A negative number {} assertion.'.format(p) print('The number you input: ', p) except ValueError as e: print(e) except AssertionError as e: print(e) ###Output input a number:340 The number you input: 340 ###Markdown File access ###Code # Add timestamp in file. import datetime try: ft = open('sample.txt', 'r+') ft.seek(0,2) ft.write('\nNow is {}.'.format(datetime.datetime.now())) ft.seek(0,0) chunk = ft.read() print('file size: {} bytes'.format(ft.tell())) ft.close() except IOError: print('>>> Sorry, file access failed.') else: print(chunk) print('>>> Congrats, file access succeed.') # Initialize file data. import datetime try: ft = open('sample.txt', 'w') ft.write('Greets from "sample.txt".') print('file size: {} bytes'.format(ft.tell())) ft.close() except IOError: print('>>> Sorry, file Initialize failed.') else: print('>>> Congrats, file Initialize succeed.') ###Output file size: 25 bytes >>> Congrats, file Initialize succeed. ###Markdown OS Module getcwd() ###Code import os print('Current work directory is ', os.getcwd()) ###Output Current work directory is /home/vincent/pythonws/jnb-sample/test
Sample app/MQA_app_original.ipynb
###Markdown Mortgage Qualifier"""Loan Qualifier Application.This is a command line application to match applicants with mortgage loans.Example: $ mqa.py""" ###Code #import dependencies import sys import fire import questionary from pathlib import Path from qualifier.utils.fileio import ( load_csv, save_csv, ) #import calculators from qualifier.utils.calculators import ( calculate_monthly_debt_ratio, calculate_loan_to_value_ratio, ) #import qualifiers from qualifier.filters.max_loan_size import filter_max_loan_size from qualifier.filters.credit_score import filter_credit_score from qualifier.filters.debt_to_income import filter_debt_to_income from qualifier.filters.loan_to_value import filter_loan_to_value #load bank data def load_bank_data(): """Ask for the file path to the latest banking data and load the CSV file. Returns: The bank data from the data rate sheet CSV file. """ csvpath = "./data/daily_rate_sheet.csv" csvpath = Path(csvpath) if not csvpath.exists(): sys.exit(f"Oops! Can't find this path: {csvpath}") return load_csv(csvpath) #load applicant info def get_applicant_info(): """Prompt dialog to get the applicant's financial information. Returns: Returns the applicant's financial information. """ credit_score = questionary.text("What's your credit score?").ask() debt = questionary.text("What's your current amount of monthly debt?").ask() income = questionary.text("What's your total monthly income?").ask() loan_amount = questionary.text("What's your desired loan amount?").ask() home_value = questionary.text("What's your home value?").ask() credit_score = int(credit_score) debt = float(debt) income = float(income) loan_amount = float(loan_amount) home_value = float(home_value) return credit_score, debt, income, loan_amount, home_value #find qualifying loans def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value): """Determine which loans the user qualifies for. Loan qualification criteria is based on: - Credit Score - Loan Size - Debit to Income ratio (calculated) - Loan to Value ratio (calculated) Args: bank_data (list): A list of bank data. credit_score (int): The applicant's current credit score. debt (float): The applicant's total monthly debt payments. income (float): The applicant's total monthly income. loan (float): The total loan amount applied for. home_value (float): The estimated home value. Returns: A list of the banks willing to underwrite the loan. """ # Calculate the monthly debt ratio monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income) print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}") # Calculate loan to value ratio loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value) print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.") # Run qualification filters bank_data_filtered = filter_max_loan_size(loan, bank_data) bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered) bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered) bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered) print(f"Found {len(bank_data_filtered)} qualifying loans") return bank_data_filtered #save Qualifying loans def save_qualifying_loans(qualifying_loans): """Saves the qualifying loans to a CSV file. Args: qualifying_loans (list of lists): The qualifying bank loans. """ # @TODO: Complete the usability dialog for savings the CSV Files. # YOUR CODE HERE! if len(qualifying_loans) > 0: #Would you like to save? save = questionary.confirm("Would you like to save?").ask() if save == True: #csvpath = Path('qualifying_loans.csv') csvpath = questionary.text("Where would you like to save?").ask() save_csv(csvpath, qualifying_loans) print('writing file...') if save == False: sys.exit(f"You chose note to save") return save elif len(qualifying_loans) == 0: sys.exit("Sorry there are no qualifying loans") #run main function def run(): """The main function for running the script.""" # Load the latest Bank data bank_data = load_bank_data() # Get the applicant's information credit_score, debt, income, loan_amount, home_value = get_applicant_info() # Find qualifying loans qualifying_loans = find_qualifying_loans( bank_data, credit_score, debt, income, loan_amount, home_value ) # Save qualifying loans save_qualifying_loans(qualifying_loans) if __name__ == "__main__": fire.Fire(run) ###Output _____no_output_____
Code/Assignment-9/Independent Analysis.ipynb
###Markdown Independent Analysis - Srinivas (handle: thewickedaxe)** PLEASE SCROLL TO THE BOTTOM OF THE NOTEBOOK TO FIND THE QUESTIONS AND THEIR ANSWERS** ** In this notebook we we explore dimensionality reduction with ISOMAP and MDS and their effects on classification** Initial Data Cleaning ###Code # Standard import pandas as pd import numpy as np %matplotlib inline import matplotlib.pyplot as plt # Dimensionality reduction and Clustering from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.cluster import MeanShift, estimate_bandwidth from sklearn import manifold, datasets from itertools import cycle # Plotting tools and classifiers from matplotlib.colors import ListedColormap from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_moons, make_circles, make_classification from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA from sklearn import cross_validation from sklearn.cross_validation import LeaveOneOut from sklearn.cross_validation import LeavePOut # Let's read the data in and clean it def get_NaNs(df): columns = list(df.columns.get_values()) row_metrics = df.isnull().sum(axis=1) rows_with_na = [] for i, x in enumerate(row_metrics): if x > 0: rows_with_na.append(i) return rows_with_na def remove_NaNs(df): rows_with_na = get_NaNs(df) cleansed_df = df.drop(df.index[rows_with_na], inplace=False) return cleansed_df initial_data = pd.DataFrame.from_csv('Data_Adults_1_reduced.csv') cleansed_df = remove_NaNs(initial_data) # Let's also get rid of nominal data numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] X = cleansed_df.select_dtypes(include=numerics) print X.shape # Let's now clean columns getting rid of certain columns that might not be important to our analysis cols2drop = ['GROUP_ID', 'doa', 'Baseline_header_id', 'Concentration_header_id', 'Baseline_Reading_id', 'Concentration_Reading_id'] X = X.drop(cols2drop, axis=1, inplace=False) print X.shape # For our studies children skew the data, it would be cleaner to just analyse adults X = X.loc[X['Age'] >= 18] print X.shape ###Output (3926, 322) (3881, 322) ###Markdown we've now dropped the last of the discrete numerical inexplicable data, and removed children from the mix Extracting the samples we are interested in ###Code # Let's extract ADHd and Bipolar patients (mutually exclusive) ADHD = X.loc[X['ADHD'] == 1] ADHD = ADHD.loc[ADHD['Bipolar'] == 0] BP = X.loc[X['Bipolar'] == 1] BP = BP.loc[BP['ADHD'] == 0] print ADHD.shape print BP.shape # Keeping a backup of the data frame object because numpy arrays don't play well with certain scikit functions ADHD_df = ADHD.copy(deep = True) BP_df = BP.copy(deep = True) ADHD = pd.DataFrame(ADHD.drop(['Patient_ID'], axis = 1, inplace = False)) BP = pd.DataFrame(BP.drop(['Patient_ID'], axis = 1, inplace = False)) ###Output (1383, 322) (440, 322) ###Markdown we see here that there 1383 people who have ADHD but are not Bipolar and 440 people who are Bipolar but do not have ADHD Dimensionality reduction PCA ###Code combined = pd.concat([ADHD, BP]) combined_backup = pd.concat([ADHD, BP]) pca = PCA(n_components = 24, whiten = "True").fit(combined) combined = pca.transform(combined) print sum(pca.explained_variance_ratio_) combined = pd.DataFrame(combined) ADHD_reduced_df = combined[:1383] BP_reduced_df = combined[1383:] ADHD_reduced_df_id = ADHD_reduced_df.copy(deep = True) BP_reduced_df_id = BP_reduced_df.copy(deep = True) ADHD_reduced_df_id['Patient_ID'] = 123 BP_reduced_df_id['Patient_ID'] = 123 print ADHD_reduced_df.shape print BP_reduced_df.shape print ADHD_reduced_df_id.shape print BP_reduced_df_id.shape # resorting to some hacky crap, that I am ashamed to write, but pandas is refusing to cooperate z = [] for x in BP_df['Patient_ID']: z.append(x) BP_reduced_df_id['Patient_ID'] = z z = [] for x in ADHD_df['Patient_ID']: z.append(x) ADHD_reduced_df_id['Patient_ID'] = z ADHD_pca = ADHD_reduced_df.copy(deep = True) BP_pca = BP_reduced_df.copy(deep = True) ###Output 0.94670018985 (1383, 24) (440, 24) (1383, 25) (440, 25) ###Markdown We see here that most of the variance is preserved with just 24 features. Manifold Techniques ISOMAP ###Code combined = manifold.Isomap(20, 20).fit_transform(combined_backup) ADHD_iso = combined[:1383] BP_iso = combined[1383:] print pd.DataFrame(ADHD_iso).head() ###Output 0 1 2 3 4 5 \ 0 1902.039550 -585.139359 -218.456990 -284.670196 -270.290695 800.963832 1 -1460.620572 760.904059 -145.632148 -316.888910 73.448451 -496.774712 2 -979.903617 -244.839809 287.919386 -809.002862 -93.291661 31.009373 3 2380.896428 -136.396847 1038.059415 94.820461 -8.510901 93.103319 4 3785.806785 -743.303358 -55.820741 532.036809 120.361002 -103.937491 6 7 8 9 10 11 \ 0 -131.229362 -58.954203 -488.117735 290.063022 -233.997831 -18.622278 1 -360.861443 -35.114610 4.569729 182.670645 109.859489 43.898248 2 -322.128221 135.822512 473.378554 302.339227 -101.557987 -486.187360 3 58.787224 44.339368 288.570020 145.449473 273.676975 330.433262 4 -328.883959 -308.048198 -176.178076 206.023323 93.457154 99.003199 12 13 14 15 16 17 \ 0 -56.004988 -32.666850 -246.312982 -118.228148 84.463058 225.965210 1 489.537799 -117.460890 -233.599066 -434.049786 350.592390 219.675481 2 472.590006 -287.829714 259.600410 -187.051557 -397.879062 -278.972686 3 346.786453 484.991693 222.228941 -462.668567 140.125330 170.317520 4 7.753871 -216.860875 11.532852 -425.298647 77.843013 -314.525649 18 19 0 -66.060321 -139.221523 1 216.895233 -52.940637 2 43.510626 56.987645 3 226.482586 -346.429327 4 -83.576841 391.517919 ###Markdown Multi dimensional scaling ###Code mds = manifold.MDS(20).fit_transform(combined_backup) ADHD_mds = combined[:1383] BP_mds = combined[1383:] print pd.DataFrame(ADHD_mds).head() ###Output 0 1 2 3 4 5 \ 0 1902.039550 -585.139359 -218.456990 -284.670196 -270.290695 800.963832 1 -1460.620572 760.904059 -145.632148 -316.888910 73.448451 -496.774712 2 -979.903617 -244.839809 287.919386 -809.002862 -93.291661 31.009373 3 2380.896428 -136.396847 1038.059415 94.820461 -8.510901 93.103319 4 3785.806785 -743.303358 -55.820741 532.036809 120.361002 -103.937491 6 7 8 9 10 11 \ 0 -131.229362 -58.954203 -488.117735 290.063022 -233.997831 -18.622278 1 -360.861443 -35.114610 4.569729 182.670645 109.859489 43.898248 2 -322.128221 135.822512 473.378554 302.339227 -101.557987 -486.187360 3 58.787224 44.339368 288.570020 145.449473 273.676975 330.433262 4 -328.883959 -308.048198 -176.178076 206.023323 93.457154 99.003199 12 13 14 15 16 17 \ 0 -56.004988 -32.666850 -246.312982 -118.228148 84.463058 225.965210 1 489.537799 -117.460890 -233.599066 -434.049786 350.592390 219.675481 2 472.590006 -287.829714 259.600410 -187.051557 -397.879062 -278.972686 3 346.786453 484.991693 222.228941 -462.668567 140.125330 170.317520 4 7.753871 -216.860875 11.532852 -425.298647 77.843013 -314.525649 18 19 0 -66.060321 -139.221523 1 216.895233 -52.940637 2 43.510626 56.987645 3 226.482586 -346.429327 4 -83.576841 391.517919 ###Markdown As is evident above, the 2 manifold techniques don't really offer very different dimensionality reductions. Therefore we are just going to roll with Multi dimensional scaling Clustering and other grouping experiments Mean-Shift - mds ###Code ADHD_clust = pd.DataFrame(ADHD_mds) BP_clust = pd.DataFrame(BP_mds) # This is a consequence of how we dropped columns, I apologize for the hacky code data = pd.concat([ADHD_clust, BP_clust]) # Let's see what happens with Mean Shift clustering bandwidth = estimate_bandwidth(data.get_values(), quantile=0.2, n_samples=1823) * 0.8 ms = MeanShift(bandwidth=bandwidth) ms.fit(data.get_values()) labels = ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) print('Estimated number of clusters: %d' % n_clusters_) for cluster in range(n_clusters_): ds = data.get_values()[np.where(labels == cluster)] plt.plot(ds[:,0], ds[:,1], '.') lines = plt.plot(cluster_centers[cluster, 0], cluster_centers[cluster, 1], 'o') ###Output _____no_output_____ ###Markdown Though I'm not sure how to tweak the hyper-parameters of the bandwidth estimation function, there doesn't seem to be much difference. Minute variations to the bandwidth result in large cluster differences. Perhaps the data isn't very suitable for a contrived clustering technique like Mean-Shift. Therefore let us attempt something more naive and simplistic like K-Means K-Means clustering - mds ###Code kmeans = KMeans(n_clusters=2) kmeans.fit(data.get_values()) labels = kmeans.labels_ centroids = kmeans.cluster_centers_ print('Estimated number of clusters: %d' % len(centroids)) print data.shape for label in [0, 1]: ds = data.get_values()[np.where(labels == label)] plt.plot(ds[:,0], ds[:,1], '.') lines = plt.plot(centroids[i,0], centroids[i,1], 'o') ###Output _____no_output_____ ###Markdown As is evident from the above 2 experiments, no clear clustering is apparent.But there is some significant overlap and there 2 clear groups Classification Experiments Let's experiment with a bunch of classifiers ###Code ADHD_mds = pd.DataFrame(ADHD_mds) BP_mds = pd.DataFrame(BP_mds) BP_mds['ADHD-Bipolar'] = 0 ADHD_mds['ADHD-Bipolar'] = 1 data = pd.concat([ADHD_mds, BP_mds]) class_labels = data['ADHD-Bipolar'] data = data.drop(['ADHD-Bipolar'], axis = 1, inplace = False) print data.shape data = data.get_values() # Leave one Out cross validation def leave_one_out(classifier, values, labels): leave_one_out_validator = LeaveOneOut(len(values)) classifier_metrics = cross_validation.cross_val_score(classifier, values, labels, cv=leave_one_out_validator) accuracy = classifier_metrics.mean() deviation = classifier_metrics.std() return accuracy, deviation p_val = 100 knn = KNeighborsClassifier(n_neighbors = 5) svc = SVC(gamma = 2, C = 1) rf = RandomForestClassifier(n_estimators = 22) dt = DecisionTreeClassifier(max_depth = 22) qda = QDA() gnb = GaussianNB() classifier_accuracy_list = [] classifiers = [(knn, "KNN"), (svc, "SVM"), (rf, "Random Forest"), (dt, "Decision Tree"), (qda, "QDA"), (gnb, "Gaussian NB")] for classifier, name in classifiers: accuracy, deviation = leave_one_out(classifier, data, class_labels) print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation) classifier_accuracy_list.append((name, accuracy)) ###Output KNN accuracy is 0.7071 (+/- 0.455) SVM accuracy is 0.7586 (+/- 0.428) Random Forest accuracy is 0.7301 (+/- 0.444) Decision Tree accuracy is 0.6643 (+/- 0.472) QDA accuracy is 0.6961 (+/- 0.460) Gaussian NB accuracy is 0.7345 (+/- 0.442)
fabric_examples/basic_examples/create_network_l2sts.ipynb
###Markdown This notebook shows how to use Orchestrator APIs for user experiments ###Code import os from fabrictestbed.slice_manager import SliceManager, Status, SliceState import json ssh_key_file_priv=os.environ['HOME']+"/.ssh/id_rsa" ssh_key_file_pub=os.environ['HOME']+"/.ssh/id_rsa.pub" ssh_key_pub = None with open (ssh_key_file_pub, "r") as myfile: ssh_key_pub=myfile.read() ssh_key_pub=ssh_key_pub.strip() credmgr_host = os.environ['FABRIC_CREDMGR_HOST'] print(f"FABRIC Credential Manager : {credmgr_host}") orchestrator_host = os.environ['FABRIC_ORCHESTRATOR_HOST'] print(f"FABRIC Orchestrator : {orchestrator_host}") ###Output _____no_output_____ ###Markdown Create Slice Manager ObjectUsers can request tokens with different Project and Scopes by altering `project_name` and `scope` parameters in the refresh call below. ###Code slice_manager = SliceManager(oc_host=orchestrator_host, cm_host=credmgr_host , project_name='all', scope='all') # Initialize the slice manager slice_manager.initialize() ###Output _____no_output_____ ###Markdown Orchestrator API example to query for available resources ###Code status, advertised_topology = slice_manager.resources() print(f"Status: {status}") if status == Status.OK: print(f"Toplogy: {advertised_topology}") else: print(f"Error: {advertised_topology}") if status == Status.OK: advertised_topology.draw() ###Output _____no_output_____ ###Markdown Create SliceIn Release 1.0, user is expected to create tagged interface and assign the IP addresses manually. Please use the example comands indicated below: Configure Slice Parameters ###Code slice_name = 'MySlice' site1 = 'RENC' site2 = 'LBNL' node1_name = 'Node1' node2_name = 'Node2' node3_name = 'Node3' network_service_name='site2site1' nic1_name = 'node1-nic1' nic2_name = 'node2-nic1' nic3_name = 'node3-nic1' image = 'default_centos_8' image_type = 'qcow2' cores = 2 ram = 16 disk = 100 from fabrictestbed.slice_editor import ExperimentTopology, Capacities, ComponentType, ComponentModelType, ServiceType # Create topology t = ExperimentTopology() # Add node n1 = t.add_node(name=node1_name, site=site1) # Set capacities cap = Capacities() cap.set_fields(core=cores, ram=ram, disk=disk) # Set Properties n1.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Add node n2 = t.add_node(name=node2_name, site=site1) # Set properties n2.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Add node n3 = t.add_node(name=node3_name, site=site2) # Set properties n3.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Shared Cards n1.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic1_name) n2.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic2_name) n3.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic3_name) # L2STS Service t.add_network_service(name='sts1', nstype=ServiceType.L2STS, interfaces=[n1.interface_list[0], n2.interface_list[0], n3.interface_list[0]]) # Generate Slice Graph slice_graph = t.serialize() # Request slice from Orchestrator return_status, slice_reservations = slice_manager.create(slice_name=slice_name, slice_graph=slice_graph, ssh_key=ssh_key_pub) if return_status == Status.OK: slice_id = slice_reservations[0].get_slice_id() print("Submitted slice creation request. Slice ID: {}".format(slice_id)) else: print(f"Failure: {slice_reservations}") ###Output _____no_output_____ ###Markdown Get the Slice ###Code import time def wait_for_slice(slice,timeout=180,interval=10,progress=False): timeout_start = time.time() if progress: print("Waiting for slice .", end = '') while time.time() < timeout_start + timeout: return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing]) if return_status == Status.OK: slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0] if slice.slice_state == "StableOK": if progress: print(" Slice state: {}".format(slice.slice_state)) return slice if slice.slice_state == "Closing" or slice.slice_state == "Dead": if progress: print(" Slice state: {}".format(slice.slice_state)) return slice else: print(f"Failure: {slices}") if progress: print(".", end = '') time.sleep(interval) if time.time() >= timeout_start + timeout: if progress: print(" Timeout exceeded ({} sec). Slice: {} ({})".format(timeout,slice.slice_name,slice.slice_state)) return slice return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing]) if return_status == Status.OK: slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0] slice = wait_for_slice(slice, progress=True) print() print("Slice Name : {}".format(slice.slice_name)) print("ID : {}".format(slice.slice_id)) print("State : {}".format(slice.slice_state)) print("Lease End : {}".format(slice.lease_end)) ###Output _____no_output_____ ###Markdown Get the NodesRetrieve the node information and save the management IP address. Get the Topology ###Code return_status, experiment_topology = slice_manager.get_slice_topology(slice_object=slice) ###Output _____no_output_____ ###Markdown Configure Node1Use ssh to configure eth1 on node 1. ```ip addr add 192.168.10.51/24 dev eth1``` ###Code node1 = experiment_topology.nodes[node1_name] management_ip_node1 = str(node1.get_property(pname='management_ip')) print("Node Name : {}".format(node1.name)) print("Management IP : {}".format(management_ip_node1)) print() import paramiko key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip_node1,username='centos',pkey = key) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.51/24 dev eth1') stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) ###Output _____no_output_____ ###Markdown Configure Node2Use ssh to configure eth1 on each Node 2. ```ip addr add 192.168.10.52/24 dev eth1``` ###Code node2 = experiment_topology.nodes[node2_name] management_ip_node2 = str(node2.get_property(pname='management_ip')) print("Node Name : {}".format(node2.name)) print("Management IP : {}".format(management_ip_node2)) print() import paramiko key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip_node2,username='centos',pkey = key) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.52/24 dev eth1') stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) ###Output _____no_output_____ ###Markdown Configure Node3Use ssh to configure eth1 on node 3. ```ip addr add 192.168.10.52/24 dev eth1``` ###Code node3 = experiment_topology.nodes[node3_name] management_ip_node3 = str(node3.get_property(pname='management_ip')) print("Node Name : {}".format(node3.name)) print("Management IP : {}".format(management_ip_node3)) print() import paramiko key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip_node3,username='centos',pkey = key) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.53/24 dev eth1') stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) ###Output _____no_output_____ ###Markdown Delete Slice ###Code return_status, result = slice_manager.delete(slice_object=slice) print("Response Status {}".format(return_status)) print("Response received {}".format(result)) ###Output _____no_output_____ ###Markdown This notebook shows how to use Orchestrator APIs for user experiments ###Code import os from fabrictestbed.slice_manager import SliceManager, Status, SliceState import json bastion_public_addr = 'bastion-1.fabric-testbed.net' bastion_private_ipv4_addr = '192.168.11.226' bastion_private_ipv6_addr = '2600:2701:5000:a902::c' bastion_username = '<your bastion username>' bastion_key_filename = os.environ['HOME'] + "/.ssh/id_rsa_fabric" ssh_key_file_priv=os.environ['HOME']+"/.ssh/id_rsa" ssh_key_file_pub=os.environ['HOME']+"/.ssh/id_rsa.pub" ssh_key_pub = None with open (ssh_key_file_pub, "r") as myfile: ssh_key_pub=myfile.read() ssh_key_pub=ssh_key_pub.strip() credmgr_host = os.environ['FABRIC_CREDMGR_HOST'] print(f"FABRIC Credential Manager : {credmgr_host}") orchestrator_host = os.environ['FABRIC_ORCHESTRATOR_HOST'] print(f"FABRIC Orchestrator : {orchestrator_host}") ###Output _____no_output_____ ###Markdown Create Slice Manager ObjectUsers can request tokens with different Project and Scopes by altering `project_name` and `scope` parameters in the refresh call below. ###Code slice_manager = SliceManager(oc_host=orchestrator_host, cm_host=credmgr_host , project_name='all', scope='all') # Initialize the slice manager slice_manager.initialize() ###Output _____no_output_____ ###Markdown Orchestrator API example to query for available resources ###Code status, advertised_topology = slice_manager.resources() print(f"Status: {status}") if status == Status.OK: print(f"Toplogy: {advertised_topology}") else: print(f"Error: {advertised_topology}") if status == Status.OK: advertised_topology.draw() ###Output _____no_output_____ ###Markdown Create SliceIn Release 1.0, user is expected to create tagged interface and assign the IP addresses manually. Please use the example comands indicated below: Configure Slice Parameters ###Code slice_name = 'MySlice' site1 = 'MAX' site2 = 'STAR' node1_name = 'Node1' node2_name = 'Node2' node3_name = 'Node3' network_service_name='site2site1' nic1_name = 'node1-nic1' nic2_name = 'node2-nic1' nic3_name = 'node3-nic1' username='centos' image = 'default_centos_8' image_type = 'qcow2' cores = 2 ram = 8 disk = 100 from fabrictestbed.slice_editor import ExperimentTopology, Capacities, ComponentType, ComponentModelType, ServiceType # Create topology t = ExperimentTopology() # Add node n1 = t.add_node(name=node1_name, site=site1) # Set capacities cap = Capacities() cap.set_fields(core=cores, ram=ram, disk=disk) # Set Properties n1.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Add node n2 = t.add_node(name=node2_name, site=site1) # Set properties n2.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Add node n3 = t.add_node(name=node3_name, site=site2) # Set properties n3.set_properties(capacities=cap, image_type=image_type, image_ref=image) # Shared Cards n1.add_component(model_type=ComponentModelType.SharedNIC_ConnectX_6, name=nic1_name) n2.add_component(model_type=ComponentModelType.SharedNIC_ConnectX_6, name=nic2_name) n3.add_component(model_type=ComponentModelType.SharedNIC_ConnectX_6, name=nic3_name) # L2STS Service t.add_network_service(name='sts1', nstype=ServiceType.L2STS, interfaces=[n1.interface_list[0], n2.interface_list[0], n3.interface_list[0]]) # Generate Slice Graph slice_graph = t.serialize() # Request slice from Orchestrator return_status, slice_reservations = slice_manager.create(slice_name=slice_name, slice_graph=slice_graph, ssh_key=ssh_key_pub) if return_status == Status.OK: slice_id = slice_reservations[0].get_slice_id() print("Submitted slice creation request. Slice ID: {}".format(slice_id)) else: print(f"Failure: {slice_reservations}") ###Output _____no_output_____ ###Markdown Get the Slice ###Code import time def wait_for_slice(slice,timeout=180,interval=10,progress=False): timeout_start = time.time() if progress: print("Waiting for slice .", end = '') while time.time() < timeout_start + timeout: return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing]) if return_status == Status.OK: slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0] if slice.slice_state == "StableOK": if progress: print(" Slice state: {}".format(slice.slice_state)) return slice if slice.slice_state == "Closing" or slice.slice_state == "Dead": if progress: print(" Slice state: {}".format(slice.slice_state)) return slice else: print(f"Failure: {slices}") if progress: print(".", end = '') time.sleep(interval) if time.time() >= timeout_start + timeout: if progress: print(" Timeout exceeded ({} sec). Slice: {} ({})".format(timeout,slice.slice_name,slice.slice_state)) return slice return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing]) if return_status == Status.OK: slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0] slice = wait_for_slice(slice, progress=True) print() print("Slice Name : {}".format(slice.slice_name)) print("ID : {}".format(slice.slice_id)) print("State : {}".format(slice.slice_state)) print("Lease End : {}".format(slice.lease_end)) ###Output _____no_output_____ ###Markdown Get the NodesRetrieve the node information and save the management IP address. Get the Topology ###Code return_status, experiment_topology = slice_manager.get_slice_topology(slice_object=slice) ###Output _____no_output_____ ###Markdown Configure Node1Use ssh to configure eth1 on node 1. ```ip addr add 192.168.10.51/24 dev eth1``` ###Code node1 = experiment_topology.nodes[node1_name] management_ip_node1 = str(node1.get_property(pname='management_ip')) print("Node Name : {}".format(node1.name)) print("Management IP : {}".format(management_ip_node1)) print() from ipaddress import ip_address, IPv4Address def validIPAddress(IP: str) -> str: try: return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6" except ValueError: return "Invalid" import paramiko management_ip = management_ip_node1 key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) bastion=paramiko.SSHClient() bastion.set_missing_host_key_policy(paramiko.AutoAddPolicy()) bastion.connect(bastion_public_addr, username=bastion_username, key_filename=bastion_key_filename) bastion_transport = bastion.get_transport() if validIPAddress(management_ip) == 'IPv4': src_addr = (bastion_private_ipv4_addr, 22) elif validIPAddress(management_ip) == 'IPv6': src_addr = (bastion_private_ipv6_addr, 22) else: print('Management IP Invalid: {}'.format(management_ip)) dest_addr = (management_ip, 22) bastion_channel = bastion_transport.open_channel("direct-tcpip", dest_addr, src_addr) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip,username=username,pkey = key, sock=bastion_channel) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.51/24 dev eth1') stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) ###Output _____no_output_____ ###Markdown Configure Node2Use ssh to configure eth1 on each Node 2. ```ip addr add 192.168.10.52/24 dev eth1``` ###Code node2 = experiment_topology.nodes[node2_name] management_ip_node2 = str(node2.get_property(pname='management_ip')) print("Node Name : {}".format(node2.name)) print("Management IP : {}".format(management_ip_node2)) print() from ipaddress import ip_address, IPv4Address def validIPAddress(IP: str) -> str: try: return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6" except ValueError: return "Invalid" import paramiko management_ip = management_ip_node2 key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) bastion=paramiko.SSHClient() bastion.set_missing_host_key_policy(paramiko.AutoAddPolicy()) bastion.connect(bastion_public_addr, username=bastion_username, key_filename=bastion_key_filename) bastion_transport = bastion.get_transport() if validIPAddress(management_ip) == 'IPv4': src_addr = (bastion_private_ipv4_addr, 22) elif validIPAddress(management_ip) == 'IPv6': src_addr = (bastion_private_ipv6_addr, 22) else: print('Management IP Invalid: {}'.format(management_ip)) dest_addr = (management_ip, 22) bastion_channel = bastion_transport.open_channel("direct-tcpip", dest_addr, src_addr) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip,username=username,pkey = key, sock=bastion_channel) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.52/24 dev eth1') stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) ###Output _____no_output_____ ###Markdown Configure Node3Use ssh to configure eth1 on node 3. ```ip addr add 192.168.10.52/24 dev eth1``` ###Code node3 = experiment_topology.nodes[node3_name] management_ip_node3 = str(node3.get_property(pname='management_ip')) print("Node Name : {}".format(node3.name)) print("Management IP : {}".format(management_ip_node3)) print() from ipaddress import ip_address, IPv4Address def validIPAddress(IP: str) -> str: try: return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6" except ValueError: return "Invalid" import paramiko management_ip = management_ip_node3 key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv) bastion=paramiko.SSHClient() bastion.set_missing_host_key_policy(paramiko.AutoAddPolicy()) bastion.connect(bastion_public_addr, username=bastion_username, key_filename=bastion_key_filename) bastion_transport = bastion.get_transport() if validIPAddress(management_ip) == 'IPv4': src_addr = (bastion_private_ipv4_addr, 22) elif validIPAddress(management_ip) == 'IPv6': src_addr = (bastion_private_ipv6_addr, 22) else: print('Management IP Invalid: {}'.format(management_ip)) dest_addr = (management_ip, 22) bastion_channel = bastion_transport.open_channel("direct-tcpip", dest_addr, src_addr) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(management_ip,username=username,pkey = key, sock=bastion_channel) stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.53/24 dev eth1') stdin, stdout, stderr = client.exec_command('ifconfig eth1') print (str(stdout.read(),'utf-8').replace('\\n','\n')) ###Output _____no_output_____ ###Markdown Delete Slice ###Code return_status, result = slice_manager.delete(slice_object=slice) print("Response Status {}".format(return_status)) print("Response received {}".format(result)) ###Output _____no_output_____
aPY.ipynb
###Markdown ###Code import torch import torch.nn as nn import torchvision.datasets as datasets import torchvision.transforms as transforms import torch.optim as optim import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt from torch.autograd import Variable from copy import deepcopy from sklearn.preprocessing import normalize import glob, os class encoder(nn.Module): def __init__(self): super(encoder, self).__init__() self.fc1 = torch.nn.Linear(2048, 1000) self.fc2 = torch.nn.Linear(1000, 500) self.fc3 = torch.nn.Linear(500, 100) self.rel = torch.nn.ReLU() def forward(self, x): x = self.fc1(x) x = self.rel(x) x = self.fc2(x) x = self.rel(x) x = self.fc3(x) return x class decoder(nn.Module): def __init__(self): super(decoder, self).__init__() self.n_e = 64 self.n_y = 32 self.fc1 = torch.nn.Linear(50 + self.n_e + self.n_y, 500) self.fc2 = torch.nn.Linear(500, 1000) self.fc3 = torch.nn.Linear(1000, 2048 + 32 + 64) self.rel = torch.nn.ReLU() def forward(self, x): x = self.fc1(x) x = self.rel(x) x = self.fc2(x) x = self.rel(x) x = self.fc3(x) x_out = x[:, :2048] y_out = x[:, 2048: 2048 + 32] em_out = x[:, (2048 + 32):] return x_out, y_out, em_out class VAE(nn.Module): def __init__(self, eps): super(VAE, self).__init__() self.en = encoder() self.de = decoder() self.eps = eps def forward(self, x, one_hot, cls_att): #print(x.shape, 'aa') x = self.en(x) mu = x[:, :50] logvar = x[:, 50:] std = torch.exp(0.5 * logvar) z = mu + self.eps * std z1 = torch.cat((z, one_hot), axis = 1) z1 = torch.cat((z1, cls_att), axis = 1) return self.de(z1), mu, logvar class private(nn.Module): def __init__(self, eps): super(private, self).__init__() self.task = torch.nn.ModuleList() self.eps = eps for _ in range(4): self.task.append(VAE(self.eps)) def forward(self, x, one_hot, cls_att, task_id): return self.task[task_id].forward(x, one_hot, cls_att) class NET(nn.Module): def __init__(self, eps): super(NET, self).__init__() self.eps = eps #self.shared = VAE(self.eps) self.private = private(self.eps) #self.fc1 = torch.nn.Linear(4096, 2048) self.head = torch.nn.ModuleList() for _ in range(4): self.head.append( nn.Sequential( nn.Linear(2048, 1000), nn.Linear(1000, 500), nn.Linear(500, 32) ) ) def forward(self, x, one_hot, cls_att, task_id): #s_x, s_mu, s_logvar = self.shared(x, one_hot, cls_att) #print(s_x.shape) p_out, p_mu, p_logvar = self.private(x, one_hot, cls_att, task_id) #x = torch.cat((s_x, p_x), axis = 1) #x = self.fc1(x) return self.head[task_id].forward(x), (p_out, p_mu, p_logvar) def common_features(self, z, task_id): x_p, _, _ = self.private.task[task_id].de(z) #x_s = self.shared.de(z) #x = torch.cat((x_s, x_p), axis = 1) return x_p #self.fc1(x) path = 'FolderPath' train_data_path = path + '/trainData' train_label_path = path + '/trainLabels' train_attr_path = path + '/trainAttributes' test_data_path = path + '/testData' test_label_path = path + '/testLabels' test_attr_path = path + '/testAttributes' attributes_path = path + '/dataAttributes' def dataprocess(data_path): with open(data_path, 'rb') as fopen: #contents = np.load(fopen, allow_pickle=True, encoding='bytes') contents = np.load(fopen, allow_pickle=True, encoding='latin1') return contents trainData1 = dataprocess(train_data_path) trainLabels1 = dataprocess(train_label_path) trainLabelsVectors1 = dataprocess(train_attr_path) testData1 = dataprocess(test_data_path) testLabels1 = dataprocess(test_label_path) testlabelsvectors1 = dataprocess(test_attr_path) ATTR = dataprocess(attributes_path) class CLASSIFIER(nn.Module): def __init__(self): super(CLASSIFIER, self).__init__() self.fc1 = torch.nn.Linear(2048, 2000) self.fc2 = torch.nn.Linear(2000, 200) self.fc3 = torch.nn.Linear(200, 32) self.drop = nn.Dropout(p = 0.2) self.rel = torch.nn.ReLU() def forward(self, x): #print(x.shape, '254') x = self.fc1(x) x = self.rel(x) x = self.fc2(x) x = self.rel(x) x = self.drop(x) x = self.fc3(x) return x from sklearn.preprocessing import normalize from sklearn.preprocessing import StandardScaler import random class CL_VAE(): def __init__(self): super(CL_VAE, self).__init__() self.batch_size = 64 self.num_classes = 32 self.build_model() self.set_cuda() self.criterion = torch.nn.CrossEntropyLoss() self.recon = torch.nn.MSELoss() #self.L1 = torch.nn.L1Loss() self.L1 = torch.nn.MSELoss() self.seen_acc = [] self.unseen_acc = [] self.hm_acc = [] self.overall_acc = [] def build_model(self): self.eps = torch.randn(self.batch_size, 50) self.eps = self.eps.cuda() self.net = NET(self.eps) pytorch_total_params = sum(p.numel() for p in self.net.parameters() if p.requires_grad) print('pytorch_total_params:', pytorch_total_params) def set_cuda(self): self.net.cuda() def VAE_loss(self, recon, mu, sigma): kl_div = -0.5 * torch.sum(1 + sigma - mu.pow(2) - sigma.exp()) #print('kl_div', kl_div.item()) return recon + kl_div def train(self, all_traindata, all_trainlabels, all_testdata, all_testlabels, all_train_attr, all_test_attr, all_attr, total_tasks): replay_classes = [] for i in range(total_tasks): traindata = torch.tensor(all_traindata[i]) trainlabels = torch.tensor(all_trainlabels[i]) testdata = torch.tensor(all_testdata[i]) testlabels = torch.tensor(all_testlabels[i]) train_attr = torch.tensor(all_train_attr[i], dtype = torch.float32) test_attr = torch.tensor(all_test_attr[i]) attr = torch.tensor(all_attr) #print(trainlabels, 'avfr') replay_classes.append(sorted(list(set(trainlabels.numpy().tolist())))) if i + 1 == 1: self.train_task(traindata.float(), trainlabels, train_attr, i) #replay_classes.append(sorted(list(set(trainlabels.detach().numpy().tolist())))) else: num_gen_samples = 50 #z_dim = 108 for m in range(i): #print(replay_classes, 'replay_classes') replay_trainlabels = [] for ii in replay_classes[m]: for j in range(num_gen_samples): replay_trainlabels.append(ii) replay_trainlabels = torch.tensor(replay_trainlabels) replay_trainlabels_onehot = self.one_hot(replay_trainlabels) replay_attr = torch.tensor(attr[replay_trainlabels]) labels_attr = torch.cat((replay_trainlabels_onehot, replay_attr), axis = 1) z = torch.randn(replay_trainlabels.shape[0], 50) z_one_hot = torch.cat((z, labels_attr), axis = 1) z_one_hot = z_one_hot.cuda() replay_data = self.net.common_features(z_one_hot.float(), m).detach().cpu() train_attr = torch.cat((replay_attr, train_attr), axis = 0) traindata = torch.cat((replay_data, traindata), axis = 0) trainlabels = torch.cat((replay_trainlabels, trainlabels)) testdata = torch.cat((testdata, torch.tensor(all_testdata[m])), axis = 0) testlabels = torch.cat((testlabels, torch.tensor(all_testlabels[m]))) #print(sorted(list(set(testlabels.detach().numpy().tolist()))), 'aaa', i + 1) self.train_task(traindata.float(), trainlabels, train_attr.float(), i) testdata_unseen = [] testlabels_unseen = [] testdata_seen = [] testlabels_seen = [] for j in range(i + 1): testdata_seen = testdata_seen + all_testdata[j] testlabels_seen = testlabels_seen + all_testlabels[j] for k in range(j + 1, total_tasks): testdata_unseen = testdata_unseen + all_testdata[k] testlabels_unseen = testlabels_unseen + all_testlabels[k] all_labels = sorted(list(set(testlabels_seen))) + sorted(list(set(testlabels_unseen))) num_samples = 150 labels_list = [] for label in all_labels: for l in range(num_samples): labels_list.append(label) attr_labels = attr[labels_list] labels_list = torch.tensor(labels_list, dtype = torch.int64) labels_list_onehot = self.one_hot(labels_list) #print(labels_list_onehot.shape, 'aa', attr_labels.shape) attr_labels_onehot = torch.cat((labels_list_onehot, attr_labels), axis = 1) noise = torch.randn(len(labels_list), 50) noise_others = torch.cat((noise, attr_labels_onehot), axis = 1) noise_others = noise_others.float().cuda() #print(noise_others.shape, 'aaa') pseudodata = self.net.common_features(noise_others, i) test_seen = torch.tensor(testdata_seen) testlabels_s = torch.tensor(testlabels_seen) testlabels_us = torch.tensor(testlabels_unseen) #print(test_seen.shape, test_unseen.shape, testlabels_s.shape, testlabels_us.shape) scaler = StandardScaler() pseudodata = torch.from_numpy(scaler.fit_transform(pseudodata.detach().cpu().numpy())).cuda() test_seen = torch.from_numpy(scaler.transform(test_seen.detach().numpy())) if i < total_tasks - 1: test_unseen = torch.tensor(testdata_unseen) test_unseen = torch.from_numpy(scaler.transform(test_unseen.detach().numpy())) #pseudodata = torch.from_numpy(normalize(pseudodata.detach().cpu().numpy(), axis = 1)).cuda() #test_seen = torch.from_numpy(normalize(pseudodata.detach().cpu().numpy), axis = 1).to(cuda) else: test_unseen = None testlabels_us = None self.class_train(i, pseudodata, labels_list.cuda(), test_seen, testlabels_s, test_unseen, testlabels_us) def dataloader(self, x, y, attr = None): #x = x.detach().numpy() #length = x.shape[0] length = x.size()[0] indices = np.arange(length) random.shuffle(indices) new_x = x[indices] new_y = y[indices] if attr is not None: new_attr = attr[indices] return new_x, new_y, new_attr else: return new_x, new_y #print(x.shape, dataloader(x, args)) def class_train(self, task_id, pseudodata, labels_list, test_seen, testlabels_s, test_unseen = None, testlabels_us = None): pseudodata, labels_list = self.dataloader(pseudodata, labels_list) #print(sorted(list(set(labels_list.detach().cpu().numpy()))), 'aaa') self.CLASS = CLASSIFIER() self.CLASS = self.CLASS.cuda() class_opti = torch.optim.Adam(self.CLASS.parameters(), lr = 1e-4) num_epochs = 25 batch_s = 64 num_iter = int(pseudodata.shape[0]/batch_s) for e in range(num_epochs): for i in range(num_iter): self.CLASS.train() self.CLASS.zero_grad() batch_data = pseudodata[i * batch_s : (i + 1) * batch_s] batch_label = labels_list[i * batch_s : (i + 1) * batch_s] #print(batch_data.shape, '145') out = self.CLASS(batch_data) loss = self.criterion(out, batch_label) loss.backward(retain_graph = True) class_opti.step() #print('Epoch:', e + 1, 'Loss:', loss.item()) _, pred_s = torch.max(self.CLASS(test_seen.float().cuda()), axis = 1) if testlabels_us is not None: _, pred_us = torch.max(self.CLASS(test_unseen.float().cuda()), axis = 1) pred_us = pred_us.detach().cpu() pred_s = pred_s.detach().cpu() correct = {} total = {} for m in range(self.num_classes): correct[m] = 0 total[m] = 0 for m in range(test_seen.shape[0]): #print(testlabels_s[m].item(), '44') #break if pred_s[m].item() == testlabels_s[m].item(): #print(testlabels_s[m], '44') correct[testlabels_s[m].item()] += 1 total[testlabels_s[m].item()] += 1 acc1 = 0 acc2 = 0 num_s = 0 num_us = 0 seenclasses = sorted(list(set(testlabels_s.detach().cpu().numpy()))) for m in seenclasses: acc1 += correct[m]*1/total[m] num_s += 1 acc1 = acc1/num_s self.seen_acc.append(acc1) if testlabels_us is not None: unseenclasses = sorted(list(set(testlabels_us.detach().cpu().numpy()))) for m in range(test_unseen.shape[0]): if pred_us[m].item() == testlabels_us[m].item(): correct[testlabels_us[m].item()] += 1 total[testlabels_us[m].item()] += 1 for m in unseenclasses: acc2 += correct[m]/total[m] num_us += 1 acc2 = acc2/num_us self.unseen_acc.append(acc2) self.hm_acc.append((2 * self.unseen_acc[task_id] * self.seen_acc[task_id])/(self.seen_acc[task_id] + self.unseen_acc[task_id])) self.overall_acc.append((len(testlabels_s) * self.seen_acc[task_id] + len(testlabels_us) * self.unseen_acc[task_id])/(len(testlabels_s) + len(testlabels_us))) print('self.seen_acc:', np.mean(self.seen_acc)) print('self.unseen_acc:', np.mean(self.unseen_acc)) print('self.hm_acc:', np.mean(self.hm_acc)) def one_hot(self, labels): matrix = torch.zeros(len(labels), self.num_classes) rows = np.arange(len(labels)) matrix[rows, labels] = 1 return matrix def model_save(self): torch.save(self.net.state_dict(), os.path.join(self.net_path)) def train_task(self, traindata, trainlabels, train_attr, task_id): traindata, trainlabels, train_attr = self.dataloader(traindata, trainlabels, train_attr) net_opti = torch.optim.Adam(self.net.parameters(), lr = 1e-4) num_iterations = int(traindata.shape[0]/self.batch_size) num_epochs = 101 for e in range(num_epochs): for i in range(num_iterations): self.net.zero_grad() self.net.train() batch_data = traindata[i * self.batch_size : (i + 1)*self.batch_size] batch_label = trainlabels[i * self.batch_size : (i + 1)*self.batch_size] batch_train_attr = train_attr[i * self.batch_size : (i + 1)*self.batch_size] batch_label_one_hot = self.one_hot(batch_label) batch_data = batch_data.cuda() batch_label = batch_label.cuda() batch_label_one_hot = batch_label_one_hot.cuda() batch_train_attr = batch_train_attr.cuda() out, private_out = self.net(batch_data, batch_label_one_hot, batch_train_attr, task_id) #s_x, s_mu, s_logvar = shared_out p_out, p_mu, p_logvar = private_out p_x, p_y, p_em = p_out #print(p_y.shape, 'aa', p_em.shape) #print(batch_label.shape, 'ap', batch_train_attr.shape) #print(out.shape, '12', batch_label.shape, s_x.shape) #p_y_onehot = self.one_hot(p_y) #detach cross_en_loss = self.criterion(out, batch_label) y_loss = self.L1(p_y, batch_label_one_hot) #print(p_em.shape, batch_train_attr.shape) em_loss = self.L1(p_em, batch_train_attr) #s_recon = self.recon(batch_data, s_x) p_recon = self.recon(batch_data, p_x) #s_VAE_loss = self.VAE_loss(s_recon, s_mu, s_logvar) p_VAE_loss = self.VAE_loss(p_recon, p_mu, p_logvar) all_loss = cross_en_loss + p_VAE_loss #+ y_loss + em_loss#+ s_VAE_loss all_loss.backward(retain_graph=True) net_opti.step() #print('epoch:', e + 1, 'task_loss', cross_en_loss.item(), 'p_VAE', p_VAE_loss.item()) import time model = CL_VAE() st = time.time() model.train(trainData1, trainLabels1, testData1, testLabels1, trainLabelsVectors1, testlabelsvectors1, ATTR, 4) en = time.time() print("It takes:", en - st, 'seconds') ###Output _____no_output_____
challenges/ibm-quantum/africa-2021/lab3/lab3.ipynb
###Markdown IBM Quantum Challenge Africa: Quantum Chemistry for HIV Table of Contents| Walk-through ||:-||[Preface](preface)||[Introduction](intro)||[Step 1 : Defining the Molecular Geometry](step_1)||[Step 2 : Calculating the Qubit Hamiltonian](step_2)||[Step 2a: Constructing the Fermionic Hamiltonion](step_3)||[Step 2b: Getting Ready to Convert to a Qubit Hamiltonian](step_2b)||[Step 3 : Setting up the Variational Quantum Eigensolver (VQE)](step_3)||[Step 3a: The V in VQE (i.e. the Variational form, a Trial state)](step_3a)||[Step 3b: The Q in VQE: the Quantum environment](step_3b)||[Step 3c: Initializing VQE](step_3c)||[Step 4 : Solving for the Ground-state](step_4)||||[The HIV Challenge](challenge)||[1. Refining Step 1: Varying the Molecule](refine_step_1)||[2. Refining Step 2: Reducing the quantum workload](refine_step_2)||[3. Refining Step 4: Energy Surface](refine_step_4)||[4. Refining Step 3a](refine_step_3a)||Exercises||[Exercise 3a: Molecular Definition of Macromolecule with Blocking Approach](exercise_3a)||[Exercise 3b: Classical-Quantum Treatment Conceptual Questions (Multiple-Choice)](exercise_3b)||[Exercise 3c: Energy Landscape, To bind or not to bind?](exercise_3c)||[Exercise 3d: The effect of more repetitions](exercise_3d)||[Exercise 3e: Open-ended: Find the best hardware_inspired_trial to minimize the Energy Error for the Macromolecule](exercise_3e)||[Quantum Chemistry Resources](qresource)|Preface**HIV is a virus that has presented an immense challenge for public health, globally**. The ensuing disease dynamics touch on multiple societal dimensions including nutrition, access to health, education and research funding. To compound the difficulties, the virus mutates rapidly with different strains having different geographic footprints. In particular, the HIV-1-C and HIV-2 strains predominate mostly in Africa. Due to disparities in funding, research for treatments of the African strains lags behind other programmes. African researchers are striving to address this imbalance and should consider adding the latest technologies such as quantum computing to their toolkits.**Quantum computing promises spectacular improvements in drug-design**. In particular, in order to design new anti-retrovirals it is important to perform **chemical simulations** to confirm that the anti-retroviral binds with the virus protein. Such simulations are notoriously hard and sometimes ineffective on classical supercomputers. Quantum computers promise more accurate simulations allowing for a better drug-design workflow.In detail: anti-retrovirals are drugs that bind with and block a virus protein, called protease, that cleaves virus polyproteins into smaller proteins, ready for packaging. The protease can be thought of as a chemical scissor. The anti-retroviral can be thought of as a sticky obstacle that disrupts the ability of the scissor to cut. With the protease blocked, the virus cannot make more copies of itself.Mutations in the viral protease changes the binding propensity of a given anti-retroviral. Hence, when a mutation occurs and an anti-retroviral no longer binds well, the goal becomes to adjust the anti-retroviral molecule to again bind strongly.**The main goal of this challenge is to explore whether a toy anti-retroviral molecule binds with a toy virus protease.**Along the way, this challenge introduces **state-of-the-art hybrid classical-quantum embedded chemistry modelling** allowing the splitting of the work-load between classical approximations and more accurate quantum calculations.Finally, you need to tweak the setup of the quantum chemistry algorithm (without having to understand the nuts and bolts of quantum computing) to achieve the best performance for ideal quantum computing conditions. *A video explaining how HIV infects and how anti-retroviral treatment works*: ###Code from IPython.display import display, YouTubeVideo YouTubeVideo('cSNaBui2IM8') ###Output _____no_output_____ ###Markdown Walk-through: Calculating the Ground-state Energy for the Simplest Molecule in the Universe *Import relevant packages* ###Code from qiskit import Aer from qiskit_nature.drivers import PySCFDriver, UnitsType, Molecule from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem from qiskit_nature.mappers.second_quantization import JordanWignerMapper, BravyiKitaevMapper from qiskit_nature.converters.second_quantization import QubitConverter from qiskit_nature.transformers import ActiveSpaceTransformer from qiskit_nature.algorithms import GroundStateEigensolver, BOPESSampler from qiskit.algorithms import NumPyMinimumEigensolver from qiskit.utils import QuantumInstance from qiskit_nature.circuit.library.ansatzes import UCCSD from qiskit_nature.circuit.library.initial_states import HartreeFock from qiskit.circuit.library import TwoLocal from qiskit.algorithms import VQE from qiskit.algorithms.optimizers import COBYLA from functools import partial as apply_variation_to_atom_pair import numpy as np import matplotlib.pyplot as plt ###Output /opt/conda/lib/python3.8/site-packages/pyscf/lib/misc.py:47: H5pyDeprecationWarning: Using default_file_mode other than 'r' is deprecated. Pass the mode to h5py.File() instead. h5py.get_config().default_file_mode = 'a' ###Markdown IntroductionIn the HIV Challenge, we are tasked with investigating whether the toy anti-retroviral molecule binds with and therefore, disrupts the toy protease molecule. Successful binding is determined by a lower total ground-state energy for the molecules when they are close together (forming a single macromolecule) compared to far apart.Total ground-state energy refers to the sum of the energies concerning the arrangement of the electrons and the nuclei. The nuclear energy is easy to calculate classically. It is the energy of the electron distribution (i.e. molecular spin-orbital occupation) that is extremely difficult and requires a quantum computer.We start with a walk-through tutorial, where we calculate the ground-state energy of a simple molecule and leave the more complicated set-up to the challenge section. The ground-state of a molecule in some configuration consists of the locations of the nuclei, together with some distribution of electrons around the nuclei. The nucleus-nucleus, nuclei-electron and electron-electron forces/energy of attraction and repulsion are captured in a matrix called the **Hamiltonian**. Since the nuclei are relatively massive compared to the electrons, they move at a slower time-scale than the electrons. This allows us to split the calculation into two parts: placing the nuclei and calculating the electron distribution, followed by moving the nuclei and recalculating the electron distribution until a minimum total energy distribution is reached: Algorithm: Find_total_ground_statePlace nuclei Repeat until grid completed or no change in total_energy: - calculate electronic ground-state - total_energy = (nuclei repulsion + electronic energy) - move nuclei (either in grid or following gradient)return total_energy In the walk-through, we simply fix the nuclei positions; however, later, in the challenge section, we allow for a varying one-dimensional intermolecular distance between the anti-retroviral and the protease molecules, which represents the anti-retroviral approaching the protease molecule in an attempt to bind. Step 1: Defining the Molecular Geometry For this walk-through, we work with the simplest non-trivial molecule possible: H$_2$, the hydrogen gas molecule.*The first thing to do is to fix the location of each nucleus. This is specified as a python list of nuclei, where each nucleus (as a list) contains a string corresponding to the atomic species and its 3D co-ordinates (as another list). We also specify the overall charge, which tells Qiskit to automatically calculate the number of needed electrons to produce that charge:* ###Code hydrogen_molecule = Molecule(geometry= [['H', [0., 0., 0.]], ['H', [0., 0., 0.735]]], charge=0, multiplicity=1) ###Output _____no_output_____ ###Markdown Step 2: Calculating the Qubit Hamiltonian Once nuclei positions are fixed (the nucleus-nucleus forces are temporarily irrelevant), the only part of the Hamiltonian that then needs to be calculated on the quantum computer is the detailed electron-electron interaction. The nuclei-electron and a rough mean field electron-electron interaction can be pre-computed as *allowed molecular orbitals* on a classical computer via the, so called, Hartree-Fock approximation. With these allowed molecular orbitals and their pre-calculated overlaps, Qiskit automatically produces an interacting electron-electron **fermionic molecular-orbital Hamiltonian** (called Second Quantization). The molecular orbital and overlap pre-calculation are provided by classical packages, e.g. PySCF, and connected to Qiskit via a so-called *driver*, in particular, we use the PySCFDriver. Step 2a: Constructing the Fermionic Hamiltonion *We specify the driver to the classical software package that is to be used to calculate the resulting orbitals of the provided molecule after taking into account the nuclei-electron and mean-field interactions. The `basis` option selects the basis set in which the molecular orbitals are to be expanded in. `sto3g` is the smallest available basis set:* ###Code molecular_hydrogen_orbital_maker = PySCFDriver(molecule=hydrogen_molecule, unit=UnitsType.ANGSTROM, basis='sto3g') ###Output _____no_output_____ ###Markdown *Qiskit provides a helpful Class named the ElectronicStructureProblem, which calls the driver in the right way to construct the molecular orbitals. We initialise ElectronicStructureProblem with the driver (which already has the molecular information stored in it from the previous step):* ###Code hydrogen_fermionic_hamiltonian = ElectronicStructureProblem(molecular_hydrogen_orbital_maker) ###Output _____no_output_____ ###Markdown *Here, we instruct the ElectronicStructureProblem object to go ahead and create the fermionic molecular-orbital Hamiltonian (which gets stored internally):* ###Code hydrogen_fermionic_hamiltonian.second_q_ops() print("Completed running classical package.\nFermionic molecular-orbital Hamiltonian calculated and stored internally.") print("An example of HF info available: Orbital Energies", hydrogen_fermionic_hamiltonian._molecule_data_transformed.orbital_energies) ###Output Completed running classical package. Fermionic molecular-orbital Hamiltonian calculated and stored internally. An example of HF info available: Orbital Energies [-0.58062892 0.67633625] ###Markdown (If this step is not run explicitly, and its outputs are not used in an intermediary step, the final ground_state solving step would run it automatically.) Step 2b: Getting Ready to Convert to a Qubit Hamiltonian Above, *fermionic* is a term to describe the behaviour of electrons (having an anti-symmetric wave-function obeying the Pauli Exclusion principle). In order to use the quantum computer we need to map the electrons (which exhibit fermionic behavior) to the quantum computer's qubits (which have closely related spin behaviour: Pauli Exclusion but not necessarily anti-symmetric). This mapping is a generic process, independent of the driver above. There are multiple mapping methods available, each with pros and cons, and constitutes something to experiment with. *For now, we select the simplest qubit mapper/converter called the Jordan-Wigner Mapper:* ###Code map_fermions_to_qubits = QubitConverter(JordanWignerMapper()) # e.g. alternative: # map_fermions_to_qubits = QubitConverter(BravyiKitaevMapper()) ###Output _____no_output_____ ###Markdown (Note, we have just chosen the mapper above, it has not yet been applied to the fermionic Hamiltonian.) Step 3: Setting up the Variational Quantum Eigensolver (VQE)Now that we have defined the molecule and its mapping onto a quantum computer, we need to select an algorithm to solve for the ground state. There are two well-known approaches: Quantum Phase Estimation (QPE) and VQE. The first requires fault-tolerant quantum computers that have not yet been built. The second is suitable for current day, noisy **depth**-restricted quantum computers, because it is a hybrid quantum-classical method with short-depth quantum circuits. By *depth* of the circuit, it suffices to know that quantum computers can only be run for a short while, before noise completely scrambles the results.Therefore, for now, we only explore the VQE method. Furthermore, VQE offers many opportunities to tweak its configuration; thus, as an end-user you gain experience in quantum black-box tweaking. VQE is an algorithm for finding the ground-state of a molecule (or any Hamiltonian in general). It is a hybrid quantum-classical algorithm, which means that the algorithm consists of two interacting stages, a quantum stage and a classical stage. During the quantum stage, a trial molecular state is created on the quantum computer. The trial state is specified by a collection of **parameters** which are provided and adjusted by the classical stage. After the trial state is created, its energy is calculated on the quantum computer (by a few rounds of quantum-classical measurements). The result is finally available classically. At this stage, a classical optimization algorithm looks at the previous energy levels and the new energy level and decides how to adjust the trial state parameters. This process repeats until the energy essentially stops decreasing. The output of the whole algorithm is the final set of parameters that produced the winning approximation to the ground-state and its energy level. Step 3a: The V in VQE (i.e. the Variational form, a Trial state)VQE works by 'searching' for the electron orbital occupation distribution with the lowest energy, called the ground-state. The quantum computer is repeatedly used to calculate the energy of the search trial state.The trial state is specified by a collection of (randomly initialized) parameters that move the state around, in our search for the ground-state (we're minimizing the energy cost-function). The form of the 'movement' is something that can be tweaked (i.e., the definition of the structure of the *ansatz*/trial). There are two broad approaches we could follow. The first, let's call it *Chemistry-Inspired Trial-states*, is to use domain knowledge of what we expect the ground-state to look like from a chemistry point of view and build that into our trial state. The second, let's call it *Hardware-Inspired Trial-states*, is to simply try and create trial states that have as wide a reach as possible while taking into account the architecure of the available quantum computers. *Chemistry-Inspired Trial-states*Since chemistry gives us domain-specific prior information (e.g., the number of orbitals and electrons and the actual Hartree-Fock approximation), it makes sense to guide the trial state by baking this knowledge into the form of the trial. *From the HF approximation we get the number of orbitals and from that we can calculate the number of spin orbitals:* ###Code hydrogen_molecule_info = hydrogen_fermionic_hamiltonian.molecule_data_transformed num_hydrogen_molecular_orbitals = hydrogen_molecule_info.num_molecular_orbitals num_hydrogen_spin_orbitals = 2 * num_hydrogen_molecular_orbitals ###Output _____no_output_____ ###Markdown *Furthermore, we can also extract the number of electrons (spin up and spin down):* ###Code num_hydrogen_electrons_spin_up_spin_down = (hydrogen_molecule_info.num_alpha, hydrogen_molecule_info.num_beta) ###Output _____no_output_____ ###Markdown *With the number of spin orbitals, the number of electrons able to fill them and the mapping from fermions to qubits, we can construct an initial quantum computing state for our trial state:* ###Code hydrogen_initial_state = HartreeFock(num_hydrogen_spin_orbitals, num_hydrogen_electrons_spin_up_spin_down, map_fermions_to_qubits) ###Output _____no_output_____ ###Markdown *Finally, Qiskit provides a Class (Unitary Coupled Cluster Single and Double excitations, `UCCSD`) that takes the above information and creates a parameterised state inspired by the HF approximation, that can be iteratively adjusted in our attempt to find the ground-state:* ###Code hydrogen_chemistry_inspired_trial = UCCSD(map_fermions_to_qubits, num_hydrogen_electrons_spin_up_spin_down, num_hydrogen_spin_orbitals, initial_state=hydrogen_initial_state) ###Output _____no_output_____ ###Markdown *Hardware-Inspired Trial-states*The problem with the above "chemistry-inspired" trial-states, is that they are quite deep, quickly using up the available depth of current-day quantum computers. A potential solution is to forgo this chemistry knowledge and try to represent arbitrary states with trial states that are easy to prepare and parametrically "move" around on current hardware. There are two quantum operations that can be used to try and reach arbitrary states: mixing (our term for *conditional sub-space rotation*) and rotating (*unconditional rotation*). Detailed knowledge of how these operations and their sub-options work are not really needed, especially because it is not immediately obvious which settings produce the best results. Mixing (also called Entanglement maps)There are a set of available mixing strategies, that you may experiment with. This is specified with two arguments, *`entanglement`* (choosing what to mix) and *`entanglement_blocks`* (choosing how to mix):Possible *`entanglement`* values: `'linear'`, `'full'`, `'circular'`, `'sca'`Possible *`entanglement_blocks`* values: `'cz'`, `'cx'`For our purposes, it is acceptable to simply choose the first option for each setting. RotationThere are a set of available *parameterized* rotation strategies. The rotation strategies are specified as a single argument, *`rotation_blocks`*, in the form of a list of any combination of the following possibilities:Possible *`rotation_blocks`*: `'ry'`, `'rx'`,`'rz'`,`'h'`, ...Typically, this is the only place that parameters are introduced in the trial state. One parameter is introduced for every rotation, corresponding to the angle of rotation around the associated axis. (Note, `'h'` does not have any parameters and so can not be selected alone.)Again, for our purposes, an acceptable choice is the first option alone in the list. *Qiskit provides a Class called `TwoLocal` for creating random trial states by local operations only. The number of **rounds** of the local operations is specified by the argument `reps`:* ###Code hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz', entanglement='linear', reps=2) ###Output _____no_output_____ ###Markdown (Note, this trial state does not depend on the molecule.) *Just for convenience, let's choose between the two approaches by assiging the choice to a variable:* ###Code hydrogen_trial_state = hydrogen_chemistry_inspired_trial # OR # hydrogen_trial_state = hardware_inspired_trial ###Output _____no_output_____ ###Markdown Step 3b: The Q in VQE: the Quantum environment Since VQE runs on a quantum computer, it needs information about this stage. For testing purposes, this can even be a simulation, both in the form of noise-free or noisy simulations. Ultimately, we would want to run VQE an actual (albeit noisy) quantum hardware and hopefully, in the not-too-distant future, achieve results unattainable classically. For this challenge, let us pursue noise-free simulation only. Noise-Free Simulation*To set up a noise-free simulation:* ###Code noise_free_quantum_environment = QuantumInstance(Aer.get_backend('statevector_simulator')) ###Output _____no_output_____ ###Markdown Step 3c: Initializing VQE Qiskit Nature provides a class called VQE, that implements the VQE algorithm. *It is initialized in a generic way (without reference to the molecule or the Hamiltonian) and requires the two pieces of information from above: the trial state and the quantum environment:* ###Code hydrogen_vqe_solver = VQE(ansatz=hydrogen_trial_state, quantum_instance=noise_free_quantum_environment) ###Output _____no_output_____ ###Markdown (Note, the vqe solver is only tailored to hydrogen if the trial state is the hydrogen_chemistry_inspired_trial.) Step 4: Solving for the Ground-state **Qiskit Nature provides a class called GroundStateEigensolver to calculate the ground-state of a molecule.**This class first gets initialised with information that is independent of any molecule. It can then be applied to specific molecules using the same generic setup.To initialise a GroundStateEigensolver object, we need to provide the two generic algorithmic sub-components from above, the mapping method (Step 2b) and the solving method (Step 3). For testing purposes, an alternative to the VQE solver is a classical solver (see numpy_solver below). ###Code hydrogen_ground_state = GroundStateEigensolver(map_fermions_to_qubits, hydrogen_vqe_solver) ###Output _____no_output_____ ###Markdown We are finally ready to solve for the ground-state energy of our molecule.We apply the GroundStateEigensolver to the fermionic Hamiltonian (Step 2a) which has encoded in it the molecule (Step 1). The already specified mapper and VQE solver is then automatically applied for us to produce the ground-state (approximation). ###Code hydrogen_ground_state_info = hydrogen_ground_state.solve(hydrogen_fermionic_hamiltonian) print(hydrogen_ground_state_info) ###Output === GROUND STATE ENERGY === * Electronic ground state energy (Hartree): -1.857275030145 - computed part: -1.857275030145 ~ Nuclear repulsion energy (Hartree): 0.719968994449 > Total ground state energy (Hartree): -1.137306035696 === MEASURED OBSERVABLES === 0: # Particles: 2.000 S: 0.000 S^2: 0.000 M: -0.000 === DIPOLE MOMENTS === ~ Nuclear dipole moment (a.u.): [0.0 0.0 1.3889487] 0: * Electronic dipole moment (a.u.): [0.0 0.0 1.38894841] - computed part: [0.0 0.0 1.38894841] > Dipole moment (a.u.): [0.0 0.0 0.00000029] Total: 0.00000029 (debye): [0.0 0.0 0.00000074] Total: 0.00000074 ###Markdown As you can see, we have calculated the Ground-state energy of the electron distribution: -1.85 HartreeFrom the placement of the nuclei, we are also conveniently given the repulsion energy (a simple classical calculation).Finally, when it comes to the ground-state of the overall molecule it is the total ground state energy that we are trying to minimise.So the next step would be to move the nuclei and recalculate the **total ground state energy** in search of the stable nuclei positions. To end our discussion, let us compare the quantum-calculated energy to an accuracy-equivalent (but slower) classical calculation. ###Code #Alternative Step 3b numpy_solver = NumPyMinimumEigensolver() #Alternative Step 4 ground_state_classical = GroundStateEigensolver(map_fermions_to_qubits, numpy_solver) hydrogen_ground_state_info_classical = ground_state_classical.solve(hydrogen_fermionic_hamiltonian) hydrogen_energy_classical = hydrogen_ground_state_info.computed_energies[0] print("Ground-state electronic energy (via classical calculations): ", hydrogen_energy_classical, "Hartree") ###Output Ground-state electronic energy (via classical calculations): -1.857275030145182 Hartree ###Markdown The agreement to so many decimal places tells us that, for this particular Hamiltonian, the VQE process is accurately finding the lowest eigenvalue (and interestingly, the ansatz/trial does not fail to capture the ground-state, probably because it spans the entire Hilbert space). However, when comparing to nature or very accurate classical simulations of $H_2$, we find that the energy is only accurate to two decimal places, e.g. total energy VQE: -1.137 Hartree vs highly accurate classical simulation: -1.166 Hartree, which only agrees two decimal places. The reason for this is that in our above treatment there are sources of modelling error including: the placement of nuclei and a number of approximations that come with the Hartree-Fock expansion. For $H_2$ these can be addressed, but ultimately, in general, the more tricky of these sources can never be fully handled because finding the perfect ground-state is QMA-complete, i.e. the quantum version of NP-complete (i.e. 'unsolvable' for certain Hamiltonians). Then again, nature itself is not expected to be finding this perfect ground-state, so future experimention is needed to see how close a given quantum computing solution approximates nature's solution. Walk-through Finished *** The HIV ChallengeNow that we have completed the walk-through, we frame the challenge as the task to refine steps 1-4 while answering related questions. 1. Refining Step 1: Varying the MoleculeIn Step 1, we defined our molecule. For the challenge, we need to firstly define a new molecule, corresponding to our toy protease molecule (the *scissor*) with an approaching toy anti-retroviral (the *blocker*), forming a *macromolecule*. Secondly, we need to instruct Qiskit to vary the approach distance. Let's learn how to do the second step with the familiar hydrogen molecule. *Here is how to specify the type of molecular variation we are interested in (namely, changing the approach distance in absolute steps)*: ###Code molecular_variation = Molecule.absolute_stretching #Other types of molecular variation: #molecular_variation = Molecule.relative_stretching #molecular_variation = Molecule.absolute_bending #molecular_variation = Molecule.relative_bending ###Output _____no_output_____ ###Markdown *Here is how we specify which atoms the variation applies to. The numbers refer to the index of the atom in the geometric definition list. The first atom of the specified atom_pair, is moved closer to the left-alone second atom:* ###Code specific_molecular_variation = apply_variation_to_atom_pair(molecular_variation, atom_pair=(1, 0)) ###Output _____no_output_____ ###Markdown *Finally, here is how we alter the original molecular definition that you have already seen in the walk-through:* ###Code hydrogen_molecule_stretchable = Molecule(geometry= [['H', [0., 0., 0.]], ['H', [0., 0., 0.735]]], charge=0, multiplicity=1, degrees_of_freedom=[specific_molecular_variation]) ###Output _____no_output_____ ###Markdown If we wanted to test that the variation is working, we could manually specify a given amount of variation (Qiskit calls it a *perturbation*) and then see what the new geometry is: ###Code hydrogen_molecule_stretchable.perturbations = [0.1] ###Output _____no_output_____ ###Markdown (If the above were not specified, a perturbation of zero would be assumed, defaulting to the original geometry.) ###Code hydrogen_molecule_stretchable.geometry ###Output _____no_output_____ ###Markdown Notice how only the second atom of our geometry list (index 1, specified first in the atom_pair) has moved closer to the other atom by the amount we specified. When it comes time to scanning across different approach distances this is very helpfully automated by Qiskit. Specifying the Protease+Anti-retroviral Macromolecule ProteaseA real protease molecule is made up of two polypeptide chains of around one hundred amino-acids in each chain (the two chains are folded together), with neighbouring pairs connected by the so-called *peptide-bond*.For our toy protease molecule, we have decided to take inspiration from this peptide bond since it is the basic building structure holding successive amino acids in proteins together. It is one of the most important factors in determining the chemistry of proteins, including protein folding in general and the HIV protease's cleaving ability, in particular.To simplify the calculations, let us choose to focus on the O=C-N part of molecule. We keep and also add enough hydrogen atoms to try and make the molecule as realistic as possible (indeed, HCONH$_2$, Formamide, is a stable molecule, which, incidentally, is an ionic solvent, so it does "cut" ionic bonds).Making O=C-N our toy protease molecule is an extreme simplification, but nevertheless biologically motivated.Here is our toy protease:```"O": (1.1280, 0.2091, 0.0000)"N": (-1.1878, 0.1791, 0.0000)"C": (0.0598, -0.3882, 0.0000)"H": (-1.3085, 1.1864, 0.0001)"H": (-2.0305, -0.3861, -0.0001)"H": (-0.0014, -1.4883, -0.0001)```Just for fun, you may imagine that this molecule is a pair of scissors, ready to cut the HIV master protein (Gag-Pol polyprotein), in the process of making copies of the HI virus: Anti-retroviralThe anti-retroviral is a molecule that binds with the protease to **inhibit/block the cleaving mechanism**. For this challenge, we select a single carbon atom to be our stand-in for the anti-retroviral molecule. MacromoleculeEven though the two molecules are separate in our minds, when they approach, they form a single macro-molecule, with the outer-electrons forming molecular orbitals around all the atoms.As explained in the walk-through, the quantum electronic distribution is calculated for fixed atom positions, thus we have to separately place the atoms. For the first and second task, let us fix the protease's co-ordinates and only vary the anti-retroviral's position along a straight line.We arbitrarily select a line of approach passing through a given point and approaching the nitrogen atom. This "blocking" approach tries to obstruct the scissor from cutting. If it "sticks", it's working and successfully disrupts the duplication efforts of the HIV. Exercise 3a: Molecular Definition of Macromolecule with Blocking ApproachConstruct the molecular definition and molecular variation to represent the anti-retroviral approaching the nitrogen atom, between the "blades": ``` "C": (-0.1805, 1.3955, 0.0000) ``` Write your answer code here: Create a your molecule in the cell below. Make sure to name the molecule `macromolecule`. ###Code ## Add your code here specific_molecular_variation = apply_variation_to_atom_pair(molecular_variation, atom_pair=(6, 1)) macromolecule = Molecule(geometry=[('O',[1.1280, 0.2091, 0.0000]), ('N',[-1.1878, 0.1791, 0.0000]), ('C', [0.0598, -0.3882, 0.0000]), ('H',[-1.3085, 1.1864, 0.0001]), ('H',[-2.0305, -0.3861, -0.0001]), ('H',[-0.0014, -1.4883, -0.0001]), ('C',[-0.1805, 1.3955, 0.0000]),],degrees_of_freedom=[specific_molecular_variation])## ###Output _____no_output_____ ###Markdown To submit your molecule to the grader, run the cell below. ###Code from qc_grader import grade_ex3a grade_ex3a(molecule=macromolecule) ###Output Submitting your answer for ex3/partA. Please wait... Congratulations ๐ŸŽ‰! Your answer is correct and has been submitted. ###Markdown 2. Refining Step 2: Reducing the quantum workload In Step 2, we constructed the qubit Hamiltonian. If we tried to apply Step 2 and beyond to our macromolecule above, the ground state calculation simulation would fail. The reason is because since we specified a zero charge, Qiskit knows that it must work with 30 (= 2\*6+7+8+3\*1) electrons. After second quantization, this translates into, say, 60 spin-orbitals which requires 60 qubits. 60 qubits is beyond our ability to simulate classically and while there are IBM Quantum systems with more than 60 qubits available, the noise levels are currently too high to produce accurate results when using that many qubits. Thus, for the purpose of this Challenge we need to reduce the number of qubits. Fortunately, this is well-motivated from a chemistry point of view as well: the classical Hartree-Fock approximation for core-electrons is sometimes sufficient to obtain accurate chemical results. Doubly fortunately, Qiskit has just recently been extended to seamlessly allow for users to specify that certain electrons should receive quantum-computing treatment while the remaining electrons should be classically approximated. Even as more qubits come on online, this facility may prove very useful in allowing near-term quantum computers to tackle very large molecules that would otherwise be out of reach. *Therefore, we next demonstrate how to instruct Qiskit to give a certain number of electrons quantum-computing treatment:* ###Code macro_molecular_orbital_maker = PySCFDriver(molecule=macromolecule, unit=UnitsType.ANGSTROM, basis='sto3g') split_into_classical_and_quantum = ActiveSpaceTransformer(num_electrons=2, num_molecular_orbitals=2) macro_fermionic_hamiltonian = ElectronicStructureProblem(macro_molecular_orbital_maker, [split_into_classical_and_quantum]) ###Output _____no_output_____ ###Markdown Above, Qiskit provides a class called **ActiveSpaceTransformer** that takes in two arguments. The first is the number of electrons that should receive quantum-computing treatment (selected from the outermost electrons, counting inwards). The second is the number of orbitals to allow those electrons to roam over (around the so-called Fermi level). It is the second number that determines how many qubits are needed. Exercise 3b: Classical-Quantum Treatment Conceptual Questions (Multiple-Choice)Q1: Why does giving quantum treatment to outer electrons of the macromolecule first, make more heuristic sense?```A: Outer electrons have higher binding energies and therefore swing the ground state energy more, therefore requiring quantum treatment.B: Outer electrons exhibit more quantum interference because their orbitals are more spread out.C: Inner core-electrons typically occupy orbitals more straightforwardly, because they mostly orbit a single nucleus and therefore do not lower the energy much by interacting/entangling with outer electrons.```Q2: For a fixed number of quantum-treatment electrons, as you increase the number of orbitals that those electrons roam over (have access to), does the calculated ground-state energy approach the asymptotic energy from above or below?```A: The asymptotic energy is approached from above, because as you increase the possible orbitals that the electrons have access to, the lower the ground state could be.B: The asymptotic energy is approached from below, because as you increase the possible orbitals the more accurate is your simulation, adding energy that was left out before.C: The asymptotic energy is approached from below, because as you increase the possible orbitals that the electrons have access to, the lower the ground state could be.D: The asymptotic energy is approached from above, because as you increase the possible orbitals the more accurate is your simulation, adding energy that was left out before.``` **Uncomment your answers to these multiple choice questions in the code-cell below. Run the cell to submit your answers.** ###Code from qc_grader import grade_ex3b ## Q1 # answer_for_ex3b_q1 = 'A' # answer_for_ex3b_q1 = 'B' # answer_for_ex3b_q1 = 'C' ## answer_for_ex3b_q1 = 'C' ## Q2 # answer_for_ex3b_q2 = 'A' # answer_for_ex3b_q2 = 'B' # answer_for_ex3b_q2 = 'C' # answer_for_ex3b_q2 = 'D' ## answer_for_ex3b_q2 = 'A' grade_ex3b(answer_for_ex3b_q1, answer_for_ex3b_q2) ###Output Submitting your answer for ex3/partB. Please wait... Congratulations ๐ŸŽ‰! Your answer is correct and has been submitted. ###Markdown 3. Refining Step 4: Energy Surface In Step 4, we ran the ground_state solver on a given molecule once only and we haven't yet explained how to instruct Qiskit to vary the molecular geometry using the specification introduced above. As explained in the introduction, changing the nuclei positions and comparing the total energy levels, is a method for finding the nuclei arrangement with the lowest energy. If the lowest energy is **not** at "infinity", this corresponds to a "stable" bound state of the molecule at the energy minimum. The energy as a function of atomic separation is thus a crucial object of study. This function is called the **Born-Oppenheimer Potential Energy Surface (BOPES)**. Qiskit provides a helpful python Class that manages this process of varying the geometry and repeatedly calling the ground_state solver: **BOPESSampler**.Let's demonstrate BOPESSampler for the hydrogen molecule.*The only steps of the hydrogen molecule walk-through that need to be re-run are Steps 1 and 2a:* ###Code hydrogen_stretchable_molecular_orbital_maker = PySCFDriver(molecule=hydrogen_molecule_stretchable, unit=UnitsType.ANGSTROM, basis='sto3g') hydrogen_stretchable_fermionic_hamiltonian = ElectronicStructureProblem(hydrogen_stretchable_molecular_orbital_maker) ###Output _____no_output_____ ###Markdown *Secondly, here is how to call the sampler:* ###Code energy_surface = BOPESSampler(gss=hydrogen_ground_state, bootstrap=False) # same solver suffices, since the trial is the same perturbation_steps = np.linspace(-0.5, 2, 25) # 25 equally spaced points from -0.5 to 2, inclusive. energy_surface_result = energy_surface.sample(hydrogen_stretchable_fermionic_hamiltonian, perturbation_steps) ###Output /opt/conda/lib/python3.8/site-packages/qiskit_nature/algorithms/pes_samplers/bopes_sampler.py:192: DeprecationWarning: The VQE.optimal_params property is deprecated as of Qiskit Terra 0.18.0 and will be removed no sooner than 3 months after the releasedate. This information is part of the returned result object and can be queried as VQEResult.optimal_point. optimal_params = self._gss.solver.optimal_params # type: ignore ###Markdown *Thirdly, here is how to produce the famous energy landscape plot:* ###Code def plot_energy_landscape(energy_surface_result): if len(energy_surface_result.points) > 1: plt.plot(energy_surface_result.points, energy_surface_result.energies, label="VQE Energy") plt.xlabel('Atomic distance Deviation(Angstrom)') plt.ylabel('Energy (hartree)') plt.legend() plt.show() else: print("Total Energy is: ", energy_surface_result.energies[0], "hartree") print("(No need to plot, only one configuration calculated.)") plot_energy_landscape(energy_surface_result) ###Output _____no_output_____ ###Markdown For extra intuition, you may think of the energy landscape as a mountain, next to a valley, next to a plateau that a ball rolls on (the x co-ordinate of the ball corresponds the separation between the two hydrogen atoms). If the ball is not rolling too fast down the plateau (right to left) it may settle in the valley. The ball slowly rolls down the plateau because the slope is positive (representing a force of attraction between the two hydrogen atoms). If the ball overshoots the minimum point of the valley, it meets the steep negative slope of the mountain and quickly rolls back (the hydrogen atoms repell each other).Notice the minimum is at zero. This is because we defined the hydrogen molecule's nuclei positions at the known ground state positions.By the way, if we had used the hardware_inspired_trial we would have produced a similiar plot, however it would have had bumps because the anzatz does not capture the electronic ground state equally well at different bond lengths. Exercise 3c: Energy Landscape, To bind or not to bind?The million-dollar question: Does our toy anti-retrovial bind and thus block the protease? - Search for the minimum from -0.5 to 5 for 30 points. - Give quantum-computing treatment to 2 electrons roaming over 2 orbitalsQ1. Submit the energy landscape for the anti-retroviral approaching the protease.Q2. Is there a clear minimum at a finite separation? Does binding occur?```A. Yes, there is a clear minimum at 0, so binding does occur.B. Yes, there is a clear minimum at infinity, so binding only happens at infinity.C. No, there is no clear minimum for any separation, so binding occurs because there is no seperation.D. No, there is no clear minimum for any separation, so there is no binding.```(Don't preempt the answer. Furthermore, the answer might change for other approaches and other settings, so please stick to the requested settings.) *Feel free to use the following function, which collects the entire walk-through and refinements to Step 2 and 4. It takes in a Molecule (of refinement Step 1 type), the inputs for the other refinements and boolean choice of whether to use VQE or the numpy solver:* ###Code def construct_hamiltonian_solve_ground_state( molecule, num_electrons=2, num_molecular_orbitals=2, chemistry_inspired=True, hardware_inspired_trial=None, vqe=True, perturbation_steps=np.linspace(-1, 1, 3), ): """Creates fermionic Hamiltonion and solves for the energy surface. Args: molecule (Union[qiskit_nature.drivers.molecule.Molecule, NoneType]): The molecule to simulate. num_electrons (int, optional): Number of electrons for the `ActiveSpaceTransformer`. Defaults to 2. num_molecular_orbitals (int, optional): Number of electron orbitals for the `ActiveSpaceTransformer`. Defaults to 2. chemistry_inspired (bool, optional): Whether to create a chemistry inspired trial state. `hardware_inspired_trial` must be `None` when used. Defaults to True. hardware_inspired_trial (QuantumCircuit, optional): The hardware inspired trial state to use. `chemistry_inspired` must be False when used. Defaults to None. vqe (bool, optional): Whether to use VQE to calculate the energy surface. Uses `NumPyMinimumEigensolver if False. Defaults to True. perturbation_steps (Union(list,numpy.ndarray), optional): The points along the degrees of freedom to evaluate, in this case a distance in angstroms. Defaults to np.linspace(-1, 1, 3). Raises: RuntimeError: `chemistry_inspired` and `hardware_inspired_trial` cannot be used together. Either `chemistry_inspired` is False or `hardware_inspired_trial` is `None`. Returns: qiskit_nature.results.BOPESSamplerResult: The surface energy as a BOPESSamplerResult object. """ # Verify that `chemistry_inspired` and `hardware_inspired_trial` do not conflict if chemistry_inspired and hardware_inspired_trial is not None: raise RuntimeError( ( "chemistry_inspired and hardware_inspired_trial" " cannot both be set. Either chemistry_inspired" " must be False or hardware_inspired_trial must be none." ) ) # Step 1 including refinement, passed in # Step 2a molecular_orbital_maker = PySCFDriver( molecule=molecule, unit=UnitsType.ANGSTROM, basis="sto3g" ) # Refinement to Step 2a split_into_classical_and_quantum = ActiveSpaceTransformer( num_electrons=num_electrons, num_molecular_orbitals=num_molecular_orbitals ) fermionic_hamiltonian = ElectronicStructureProblem( molecular_orbital_maker, [split_into_classical_and_quantum] ) fermionic_hamiltonian.second_q_ops() # Step 2b map_fermions_to_qubits = QubitConverter(JordanWignerMapper()) # Step 3a if chemistry_inspired: molecule_info = fermionic_hamiltonian.molecule_data_transformed num_molecular_orbitals = molecule_info.num_molecular_orbitals num_spin_orbitals = 2 * num_molecular_orbitals num_electrons_spin_up_spin_down = ( molecule_info.num_alpha, molecule_info.num_beta, ) initial_state = HartreeFock( num_spin_orbitals, num_electrons_spin_up_spin_down, map_fermions_to_qubits ) chemistry_inspired_trial = UCCSD( map_fermions_to_qubits, num_electrons_spin_up_spin_down, num_spin_orbitals, initial_state=initial_state, ) trial_state = chemistry_inspired_trial else: if hardware_inspired_trial is None: hardware_inspired_trial = TwoLocal( rotation_blocks=["ry"], entanglement_blocks="cz", entanglement="linear", reps=2, ) trial_state = hardware_inspired_trial # Step 3b and alternative if vqe: noise_free_quantum_environment = QuantumInstance(Aer.get_backend('statevector_simulator')) solver = VQE(ansatz=trial_state, quantum_instance=noise_free_quantum_environment) else: solver = NumPyMinimumEigensolver() # Step 4 and alternative ground_state = GroundStateEigensolver(map_fermions_to_qubits, solver) # Refinement to Step 4 energy_surface = BOPESSampler(gss=ground_state, bootstrap=False) energy_surface_result = energy_surface.sample( fermionic_hamiltonian, perturbation_steps ) return energy_surface_result ###Output _____no_output_____ ###Markdown Your answer The following code cells give a skeleton to call `construct_hamiltonian_solve_ground_state` and plot the results. Once you are confident with your results, submit them in the code-cell that follows.**Note: `construct_hamiltonian_solve_ground_state` will take some time to run (approximately 2 minutes). Do not worry if it doesn't return a result immediately.** ###Code # Q1 # Calculate the energies q1_energy_surface_result = construct_hamiltonian_solve_ground_state( molecule=macromolecule, num_electrons=2, num_molecular_orbitals=2, chemistry_inspired=True, vqe=True, perturbation_steps=np.linspace(-0.5, 5, 30), ) # Plot the energies to visualize the results plot_energy_landscape(q1_energy_surface_result) ## Q2 # answer_for_ex3c_q2 = 'A' # answer_for_ex3c_q2 = 'B' # answer_for_ex3c_q2 = 'C' # answer_for_ex3c_q2 = 'D' answer_for_ex3c_q2 = 'D' ###Output _____no_output_____ ###Markdown Once you are happy with the results you have acquired, submit the energies and parameters for `construct_hamiltonian_solve_ground_state` in the following cell. Change the values for all parameters, except `energy_surface`, to have the same value that you used in your call of `construct_hamiltonian_solve_ground_state` ###Code from qc_grader import grade_ex3c grade_ex3c( energy_surface=q1_energy_surface_result.energies, molecule=macromolecule, num_electrons=2, num_molecular_orbitals=2, chemistry_inspired=True, hardware_inspired_trial=None, vqe=True, perturbation_steps=np.linspace(-0.5, 5, 30), q2_multiple_choice=answer_for_ex3c_q2 ) ###Output Submitting your answer for ex3/partC. Please wait... Congratulations ๐ŸŽ‰! Your answer is correct and has been submitted. ###Markdown 4. Refining Step 3a The last refinement is a lesson in how black-box tweaking can improve results.In Step 3a, the hardware_inspired_trial is designed to run on actual current-day hardware. Recall this line from the walk-through: ###Code hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz', entanglement='linear', reps=2) ###Output _____no_output_____ ###Markdown Let us get a feel for the `reps` (repetition) parameter. This parameter controls how many rounds of mix and rotate are applied in the trial state. In more detail: there is an initial round of rotations, before mix (often containing no parameters) and another round of rotations are repeated. Certain gates don't generate parameters (e.g. `h`, `cz`). Each round of rotations adds an extra set of parameters that the classical optimizer adjusts in the search for the ground state.Let's relook at the simple hydrogen molecule and compute the "ideal" lowest energy electronic energy using the chemistry trial, the numpy solver and a single zero perturbation (i.e., no perturbations): ###Code true_total_energy = construct_hamiltonian_solve_ground_state( molecule=hydrogen_molecule_stretchable, # Step 1 num_electrons=2, # Step 2a num_molecular_orbitals=2, # Step 2a chemistry_inspired=True, # Step 3a vqe=False, # Step 3b perturbation_steps = [0]) # Step 4 plot_energy_landscape(true_total_energy) ###Output Total Energy is: -1.137306035753395 hartree (No need to plot, only one configuration calculated.) ###Markdown We take this as the true value for the rest of our experiment.*Next, select `chemistry_inspired=False`, `vqe=True` and pass in a hardware trial with 1 round*: ###Code hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz', entanglement='linear', reps=1) quantum_calc_total_energy = construct_hamiltonian_solve_ground_state( molecule=hydrogen_molecule_stretchable, # Step 1 num_electrons=2, # Step 2a num_molecular_orbitals=2, # Step 2a chemistry_inspired=False, # Step 3a hardware_inspired_trial=hardware_inspired_trial, # Step 3a vqe=True, # Step 3b perturbation_steps = [0]) # Step 4 plot_energy_landscape(quantum_calc_total_energy) ###Output Total Energy is: -1.1169986828048528 hartree (No need to plot, only one configuration calculated.) ###Markdown *Notice the difference is small and positive:* ###Code quantum_calc_total_energy.energies[0] - true_total_energy.energies[0] ###Output _____no_output_____ ###Markdown *Let's see how many parameters are used to specify the trial state:* ###Code total_number_of_parameters = len(hardware_inspired_trial._ordered_parameters) print("Total number of adjustable parameters: ", total_number_of_parameters) ###Output Total number of adjustable parameters: 8 ###Markdown Exercise 3d: The effect of more repetitions Q1: Try reps equal to 1 (done for you) and 2 and compare the errors. What happens to the error? Does it increase, decrease, or stay the same?Be aware that: - VQE is a statistical algorithm, so run it a few times before observing the pattern. - Going beyond 2 may not continue the pattern. - Note that `reps` is defined in `TwoLocal`Q2: Check the total number of parameters for reps equal 1 and 2. How many parameters are introduced per round of rotations? Write your answer here: **Enter your answer to the first multiple choice question in the code-cell below and add your answer for Q2. Run the cell to submit your answers.** ###Code from qc_grader import grade_ex3d ## Q1 # answer_for_ex3d_q1 = 'decreases' # answer_for_ex3d_q1 = 'increases' # answer_for_ex3d_q1 = 'stays the same' ## answer_for_ex3d_q1 = 'decreases' ## Q2 answer_for_ex3d_q2 = 4 ## grade_ex3d(answer_for_ex3d_q1, answer_for_ex3d_q2) ###Output Submitting your answer for ex3/partD. Please wait... Congratulations ๐ŸŽ‰! Your answer is correct and has been submitted. ###Markdown Exercise 3e: Open-ended: Find the best hardware_inspired_trial to minimize the Energy Error for the Macromolecule Turning to the macromolecule again. Using, `chemistry_inspired=False`, `vqe=True`, `perturbation_steps = [0]`, a maximum of 8 qubits, and your own hardware_inspired_trial with any combination of options from the walk-through; find the lowest energy. Your answer to this exercise includes all parameters passed to `construct_hamiltonian_solve_ground_state` and the result object it returns. This exercise is scored based on how close your computed energy $E_{computed}$ is to the "true" minimum energy of the macromolecule $E_{true}$. This score is calculated as shown below, rounded to the nearest integer. $$\text{score} = -10 \times \log_{10}{\left(\left\lvert{\frac{E_{true} - E_{computed}}{E_{true}}}\right\rvert\right)}$$ Achieving a smaller error in your computed energy will increase your score. For example, if the true energy is -42.141 and you compute -40.0, you would have a score of 13. Use the following code cell to trial different `hardware_inspired_trial`s. ###Code # Modify the following variables num_electrons = 2 num_molecular_orbitals = 2 hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz', entanglement='linear', reps=5) # computed_macromolecule_energy_result = construct_hamiltonian_solve_ground_state( molecule=macromolecule, num_electrons=num_electrons, num_molecular_orbitals=num_molecular_orbitals, chemistry_inspired=False, hardware_inspired_trial=hardware_inspired_trial, vqe=True, perturbation_steps=[0], ) ###Output _____no_output_____ ###Markdown Once you are ready to submit your answer, run the following code cell to have your computed energy scored. You can submit multiple times. ###Code from qc_grader import grade_ex3e grade_ex3e( energy_surface_result=computed_macromolecule_energy_result, molecule=macromolecule, num_electrons=num_electrons, num_molecular_orbitals=num_molecular_orbitals, chemistry_inspired=False, hardware_inspired_trial=hardware_inspired_trial, vqe=True, perturbation_steps=[0], ) ###Output Submitting your answer for ex3/partE. Please wait... Congratulations ๐ŸŽ‰! Your answer is correct and has been submitted. Your score is 72. ###Markdown ---------------- Quantum Chemistry Resources*Videos*- *Quantum Chemistry I: Obtaining the Qubit Hamiltonian* - https://www.youtube.com/watch?v=2XEjrwWhr88- *Quantum Chemistry II: Finding the Ground States* - https://www.youtube.com/watch?v=_UW6puuGa5E - https://www.youtube.com/watch?v=o4BAOKbcd3o*Tutorials*- https://qiskit.org/documentation/nature/tutorials/01_electronic_structure.html - https://qiskit.org/documentation/nature/tutorials/03_ground_state_solvers.html - https://qiskit.org/documentation/nature/tutorials/05_Sampling_potential_energy_surfaces.html*Code References*- UCCSD : https://qiskit.org/documentation/stubs/qiskit.chemistry.components.variational_forms.UCCSD.html- ActiveSpaceTransformer: https://qiskit.org/documentation/nature/stubs/qiskit_nature.transformers.second_quantization.electronic.ActiveSpaceTransformer.html?highlight=activespacetransformerqiskit_nature.transformers.second_quantization.electronic.ActiveSpaceTransformer Licensing and notes:- All images used, with gratitude, are listed below with their respective licenses: - https://de.wikipedia.org/wiki/Datei:Teppichschere.jpg by CrazyD is licensed under CC BY-SA 3.0 - https://commons.wikimedia.org/wiki/File:The_structure_of_the_immature_HIV-1_capsid_in_intact_virus_particles.png by MarinaVladivostok is licensed under CC0 1.0 - https://commons.wikimedia.org/wiki/File:Peptidformationball.svg by YassineMrabet is licensed under the public domain - The remaining images are either IBM-owned, or hand-generated by the authors of this notebook.- HCONH2 (Formamide) co-ordinates kindly provided by the National Library of Medicine: - `National Center for Biotechnology Information (2021). PubChem Compound Summary for CID 713, Formamide. https://pubchem.ncbi.nlm.nih.gov/compound/Formamide.`- For further information about the Pauli exclusion principle:https://en.wikipedia.org/wiki/Pauli_exclusion_principle- We would like to thank collaborators, Prof Yasien and Prof Munro from Wits for extensive assistance.- We would like to thank all the testers and feedback providers for their valuable input. ###Code import qiskit.tools.jupyter %qiskit_version_table %qiskit_copyright ###Output _____no_output_____
Example_pipelines/CGI_example.ipynb
###Markdown Conservation-based Synthetic Lethal Search Introduction Rationale Use-cases:* Prioritize human candidate synthetic lethal interactions based on prior evidence of interaction in yeast SL screens* _de novo_ discovery of SL interactions ApproachThis notebook re-implements the approach outlined in Srivas et al. (2016) Usage:Add genes of interest to "inputGenes" value, then run the next step.Example: inputGenes = "'DDX3X','DICER1','DROSHA','TNFRSF14','TRAF7','TSC1','POLG','FBXO11','PRDM1','RFWD3','AMER1','LZTR1','ATP2B3'" Workflow Overview Datasets Yeast Synthetic Lethal InteractionsConstanzo et al. (2016) Human to Yeast Ortholog Mappingdetailed treatement in the accompanying notebook (Mapping human to yeast orthologs) Human Tumor Suppressor Genes References* Costanzo M, VanderSluis B, Koch EN, Baryshnikova A, Pons C, Tan G, Wang W, Usaj M, Hanchard J, Lee SD, Pelechano V, Styles EB, Billmann M, van Leeuwen J, van Dyk N, Lin ZY, Kuzmin E, Nelson J, Piotrowski JS, Srikumar T, Bahr S, Chen Y, Deshpande R, Kurat CF, Li SC, Li Z, Usaj MM, Okada H, Pascoe N, San Luis BJ, Sharifpoor S, Shuteriqi E, Simpkins SW, Snider J, Suresh HG, Tan Y, Zhu H, Malod-Dognin N, Janjic V, Przulj N, Troyanskaya OG, Stagljar I, Xia T, Ohya Y, Gingras AC, Raught B, Boutros M, Steinmetz LM, Moore CL, Rosebrock AP, Caudy AA, Myers CL, Andrews B, Boone C. **A global genetic interaction network maps a wiring diagram of cellular function.** Science. 2016 Sep 23;353(6306). pii: aaf1420. PubMed PMID: 27708008; PubMed Central PMCID: PMC5661885.* Srivas R, Shen JP, Yang CC, Sun SM, Li J, Gross AM, Jensen J, Licon K, Bojorquez-Gomez A, Klepper K, Huang J, Pekin D, Xu JL, Yeerna H, Sivaganesh V, Kollenstart L, van Attikum H, Aza-Blanc P, Sobol RW, Ideker T. **A Network of Conserved Synthetic Lethal Interactions for Exploration of Precision Cancer Therapy**. Mol Cell. 2016 Aug 4;63(3):514-25. doi:10.1016/j.molcel.2016.06.022.Epub 2016 Jul 21. PubMed PMID: 27453043; PubMed Central PMCID: PMC5209245. PreambleThis section describes how to setup the analysis environment appropriately, including google cloud platform authentication and importing all the relevant python libraries. Setup Analysis Environment ###Code # This code block installs the dependencies, please run it only once, the first time you run this notebook !pip3 install google-cloud-bigquery !pip3 install matplotlib !pip3 install plotly # google cloud authentication from google.cloud import bigquery # import modules import sys import matplotlib.pyplot as plt import pandas as pd import scipy from scipy import stats import numpy as np import json import statsmodels.stats.multitest as multi import matplotlib.pyplot as plt import math import ipywidgets as widgets import plotly import plotly.express as px import pyarrow # users need to run the following commend in their local machine or throught the notebook. # Make sure to install the google cloud in the local envirionment. For more detail of gcloud installation, please see support from https://cloud.google.com/sdk/docs/install !gcloud auth application-default login # Choose the project to be used for bigquery project_id='syntheticlethality' client = bigquery.Client(project_id) # Replace XXXXXXXX with your project ID %load_ext google.cloud.bigquery ###Output _____no_output_____ ###Markdown Define a set of cancer-relevant tumor suppressor genes (TSGs)In this workflow, the search for relevant synthetic lethal interactions is seeded by defining a set of tumor suppressor genes (TSGs) of interest. There are various strategies for obtaining such a list, here we give an example of mining the [COSMIC Cancer Gene Census](https://cancer.sanger.ac.uk/census) for TSG annotations and then prioritizing the list based on driver status or frequency of alteration in a cancer type of interest.If you want to get the SL interactions for genes of interest, please add the genes to "inputGenes". ###Code query = ''' SELECT * FROM `isb-cgc.COSMIC_v90_grch38.Cancer_Gene_Census` WHERE Role_in_Cancer = "TSG" ''' tsg = client.query(query).result().to_dataframe() tsg.head() # generate a list for inputGenes (Please go to the next block if you want to use your genes instead of tumor suppressor genes) tumor_suppressor_genes = tsg["Gene_Symbol"].tolist() inputGenes = ["'"+x+"'" for x in tumor_suppressor_genes] inputGenes = ','.join(inputGenes) inputGenes # please skip this block if you want to keep using tumor suppressor genes as an input #inputGenes = "" ###Output _____no_output_____ ###Markdown Map Yeast Orthologs & Get SL insteractions ###Code sql = ''' WITH --- Retreive YeastSymbols mapped to HumanSymbols for the input genes INPUT_H2Y AS ( SELECT YeastSymbol FROM `syntheticlethality.gene_information.human2Yeast` WHERE HumanSymbol IN (__INPUTGENES__) AND AlgorithmsMatch >= __ALGORITHMCUTOFF__ ), --- Identify protein-protein interactions using the YeastSymbols (left match) Yeast_ITX1 AS ( SELECT UPPER(Query_allele_name) AS Interactor1, UPPER(Array_allele_name) AS Interactor2, Genetic_interaction_score_____ AS Interaction_score, P_value FROM `syntheticlethality.CellMap.CellMap` WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND (UPPER(Query_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y)) ), --- Identify protein-protein interactions using the YeastSymbols (right match) Yeast_ITX2 AS ( SELECT UPPER(Array_allele_name) AS Interactor1, UPPER(Query_allele_name) AS Interactor2, Genetic_interaction_score_____ AS Interaction_score, P_value FROM `syntheticlethality.CellMap.CellMap` WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND (UPPER(Array_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y)) ), --- Union interaction tables Union_ITX AS ( SELECT * FROM Yeast_ITX1 UNION ALL SELECT * FROM Yeast_ITX2 ) --- Convert YeastSymbols to HumanSymbols in the protein-protein interations SELECT DISTINCT GINFO1.EntrezID AS EntrezID_Input, H2Y1.HumanSymbol AS Gene_Input, --- Add if you want to know what yeast genes are involved --- YITX.Interactor1 AS Gene_Input_Yeast, GINFO2.EntrezID AS EntrezID_SL_Candidate, H2Y2.HumanSymbol AS Gene_SL_Candidate, --- Add if you want to know what yeast genes are involved --- YITX.Interactor2 AS Gene_SL_Candidate_Yeast, YITX.Interaction_score AS Interaction_score, YITX.P_value AS P_value FROM Union_ITX AS YITX LEFT JOIN `syntheticlethality.gene_information.human2Yeast` AS H2Y1 ON YITX.Interactor1 = H2Y1.YeastSymbol LEFT JOIN `syntheticlethality.gene_information.human2Yeast` AS H2Y2 ON YITX.Interactor2 = H2Y2.YeastSymbol LEFT JOIN `syntheticlethality.gene_information.gene_info_human_HGNC` AS GINFO1 ON H2Y1.HumanID = GINFO1.HGNCID LEFT JOIN `syntheticlethality.gene_information.gene_info_human_HGNC` AS GINFO2 ON H2Y2.HumanID = GINFO2.HGNCID WHERE (H2Y1.HumanSymbol IS NOT NULL AND YITX.Interactor1 IS NOT NULL) AND (H2Y2.HumanSymbol IS NOT NULL AND YITX.Interactor2 IS NOT NULL) ''' # select the thresholds to be used cutoff_algorithmMatchNo = "3" cutoff_score = "-0.35" cutoff_p = "0.01" sql = sql.replace("__INPUTGENES__", inputGenes) sql = sql.replace("__ALGORITHMCUTOFF__", cutoff_algorithmMatchNo) sql = sql.replace("__SCORECUTOFF__", cutoff_score) sql = sql.replace("__PvalueCUTOFF__", cutoff_p) res = client.query(sql).to_dataframe() ###Output _____no_output_____ ###Markdown Get Yeast SL Interactions ###Code # shore the SL partner genes for the input genes res ###Output _____no_output_____ ###Markdown Write to file & bigQuery Table ###Code res.to_csv(path_or_buf='conserved_SL_output.csv', index=False) ###Output _____no_output_____ ###Markdown Conservation-based Synthetic Lethal Pair Search ```Title: Conservation-based Synthetic Lethal Pair Search Authors: Taek-Kyun Kim Created: 02-07-2022 Purpose: Retrieve Synthetic Lethal Partners of Genes in a Given List Using Yeast Screen and Human-yeast Homology Information Notes: Runs in MyBinder ``` Introduction Rationale Use-cases:* Identify candidate synthetic lethal gene interactions for research prioritization based on prior evidence of interaction in yeast SL screens* _de novo_ discovery of SL interactions ApproachThis notebook re-implements the approach outlined in Srivas et al. (2016) Usage:Add genes of interest to "inputGenes" variable, then run the next step. Workflow Overview Datasets Yeast Synthetic Lethal InteractionsConstanzo et al. (2016) Human to Yeast Ortholog MappingDetailed examples of the methodology used can be found in the accompanying notebook (Mapping human to yeast orthologs) References* Costanzo M, VanderSluis B, Koch EN, Baryshnikova A, Pons C, Tan G, Wang W, Usaj M, Hanchard J, Lee SD, Pelechano V, Styles EB, Billmann M, van Leeuwen J, van Dyk N, Lin ZY, Kuzmin E, Nelson J, Piotrowski JS, Srikumar T, Bahr S, Chen Y, Deshpande R, Kurat CF, Li SC, Li Z, Usaj MM, Okada H, Pascoe N, San Luis BJ, Sharifpoor S, Shuteriqi E, Simpkins SW, Snider J, Suresh HG, Tan Y, Zhu H, Malod-Dognin N, Janjic V, Przulj N, Troyanskaya OG, Stagljar I, Xia T, Ohya Y, Gingras AC, Raught B, Boutros M, Steinmetz LM, Moore CL, Rosebrock AP, Caudy AA, Myers CL, Andrews B, Boone C. **A global genetic interaction network maps a wiring diagram of cellular function.** Science. 2016 Sep 23;353(6306). pii: aaf1420. PubMed PMID: 27708008; PubMed Central PMCID: PMC5661885.* Srivas R, Shen JP, Yang CC, Sun SM, Li J, Gross AM, Jensen J, Licon K, Bojorquez-Gomez A, Klepper K, Huang J, Pekin D, Xu JL, Yeerna H, Sivaganesh V, Kollenstart L, van Attikum H, Aza-Blanc P, Sobol RW, Ideker T. **A Network of Conserved Synthetic Lethal Interactions for Exploration of Precision Cancer Therapy**. Mol Cell. 2016 Aug 4;63(3):514-25. doi:10.1016/j.molcel.2016.06.022.Epub 2016 Jul 21. PubMed PMID: 27453043; PubMed Central PMCID: PMC5209245. PreambleThis section describes how to setup the analysis environment, including google cloud platform authentication and import of the necessary python libraries. Setup Analysis Environment ###Code # This code block installs the dependencies, please run it only once, the first time you run this notebook !pip3 install google-cloud-bigquery !pip3 install matplotlib !pip3 install plotly !pip3 install scipy !pip3 install statsmodels !pip3 install ipywidgets # import modules from google.cloud import bigquery import sys import matplotlib.pyplot as plt import pandas as pd import scipy from scipy import stats import numpy as np import json import statsmodels.stats.multitest as multi import matplotlib.pyplot as plt import math import ipywidgets as widgets import plotly import plotly.express as px import pyarrow ###Output _____no_output_____ ###Markdown Google AuthenticationRunning the BigQuery cells in this notebook requires a Google Cloud Project, instructions for creating a project can be found in the [Google Documentation](https://cloud.google.com/resource-manager/docs/creating-managing-projectsconsole). The instance needs to be authorized to bill the project for queries.For more information on getting started in the cloud see ['Quick Start Guide to ISB-CGC'](https://isb-cancer-genomics-cloud.readthedocs.io/en/latest/sections/HowToGetStartedonISB-CGC.html) and alternative authentication methods can be found in the [Google Documentation](https://googleapis.dev/python/google-api-core/latest/auth.html). ###Code # Users need to run the following commend in their local machine or through the notebook. # Make sure to install the google cloud SDK in the local environment. For more detailed instructions for gcloud installation, see support at https://cloud.google.com/sdk/docs/install !gcloud auth application-default login # Choose the project to be used for bigquery project_id='syntheticlethality' client = bigquery.Client(project_id) # Replace XXXXXXXX with your project ID %load_ext google.cloud.bigquery # a list for input genes inputGenes = ["DDX3X","DICER1","DROSHA","TNFRSF14","TRAF7","TSC1",'POLG', "FBXO11","PRDM1","RFWD3","AMER1","LZTR1","ATP2B3"] inputGenes = ["'"+x+"'" for x in inputGenes] inputGenes = ','.join(inputGenes) inputGenes ###Output _____no_output_____ ###Markdown Map Yeast Orthologs & Get SL insteractions To identify genetic interactions the colony growth of double mutant strains is compared to that of single mutant strains. A genetic interaction in this dataset is defined by the growth of a double mutant colony being higher or lower than the expected growth predicted from the two corresponding single mutant colonies. These positive or negative genetic interactions are quantified by a fitness metric or generic interaction score.Synthetic lethal interactions are defined as genetic interactions with negative scores (< -0.35) at the extreme end of the distribution. Yeast genes are mapped to human genes using yeast-human orthology and we presume that synthetic lethal pairs have a high likelihood of being maintained across species. The configurable parameters are listed as follows. Find the synthetic lethal partners of the genes in the given list.**Parameters****cutoff_algorithmMatchNo** is the desired minimum matching threshold required for a yeast-human gene comparison to be considered an ortholog.**cutoff_score** The desired cutoff of the quantitative fitness metric. The default setting is (< 0.35) corresponding to the left tail of the distribution(<-0.35).**cutoff_p** the desired significance threshold, the default value is p < 0.05. ###Code sql = ''' WITH --- Retrieve YeastSymbols mapped to HumanSymbols for the input genes INPUT_H2Y AS ( SELECT YeastSymbol FROM `isb-cgc-bq.annotations_versioned.Human2Yeast_mapping_Alliance_for_Genome_Resources_R3_0_1` WHERE HumanSymbol IN (__INPUTGENES__) AND AlgorithmsMatch >= __ALGORITHMCUTOFF__ ), --- Identify protein-protein interactions using the YeastSymbols (left match) Yeast_ITX1 AS ( SELECT UPPER(Query_allele_name) AS Interactor1, UPPER(Array_allele_name) AS Interactor2, Genetic_interaction_score_____ AS Interaction_score, P_value FROM `isb-cgc-bq.supplementary_tables.Constanzo_etal_Science_2016_SGA_Genetic_Interactions` WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND (UPPER(Query_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y)) ), --- Identify protein-protein interactions using the YeastSymbols (right match) Yeast_ITX2 AS ( SELECT UPPER(Array_allele_name) AS Interactor1, UPPER(Query_allele_name) AS Interactor2, Genetic_interaction_score_____ AS Interaction_score, P_value FROM `isb-cgc-bq.supplementary_tables.Constanzo_etal_Science_2016_SGA_Genetic_Interactions` WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND (UPPER(Array_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y)) ), --- Union interaction tables Union_ITX AS ( SELECT * FROM Yeast_ITX1 UNION ALL SELECT * FROM Yeast_ITX2 ) --- Convert YeastSymbols to HumanSymbols in the protein-protein interations SELECT DISTINCT GINFO1.EntrezID AS EntrezID_Input, H2Y1.HumanSymbol AS Gene_Input, --- Add if you want to know what yeast genes are involved --- YITX.Interactor1 AS Gene_Input_Yeast, GINFO2.EntrezID AS EntrezID_SL_Candidate, H2Y2.HumanSymbol AS Gene_SL_Candidate, --- Add if you want to know what yeast genes are involved --- YITX.Interactor2 AS Gene_SL_Candidate_Yeast, YITX.Interaction_score AS Interaction_score, YITX.P_value AS P_value FROM Union_ITX AS YITX LEFT JOIN `isb-cgc-bq.annotations_versioned.Human2Yeast_mapping_Alliance_for_Genome_Resources_R3_0_1` AS H2Y1 ON YITX.Interactor1 = H2Y1.YeastSymbol LEFT JOIN `isb-cgc-bq.annotations_versioned.Human2Yeast_mapping_Alliance_for_Genome_Resources_R3_0_1` AS H2Y2 ON YITX.Interactor2 = H2Y2.YeastSymbol LEFT JOIN `isb-cgc-bq.synthetic_lethality.gene_info_human_HGNC_NCBI_2020_07` AS GINFO1 ON H2Y1.HumanID = GINFO1.HGNCID LEFT JOIN `isb-cgc-bq.synthetic_lethality.gene_info_human_HGNC_NCBI_2020_07` AS GINFO2 ON H2Y2.HumanID = GINFO2.HGNCID WHERE (H2Y1.HumanSymbol IS NOT NULL AND YITX.Interactor1 IS NOT NULL) AND (H2Y2.HumanSymbol IS NOT NULL AND YITX.Interactor2 IS NOT NULL) ''' # select the thresholds to be used cutoff_algorithmMatchNo = "3" cutoff_score = "-0.35" cutoff_p = "0.01" sql = sql.replace("__INPUTGENES__", inputGenes) sql = sql.replace("__ALGORITHMCUTOFF__", cutoff_algorithmMatchNo) sql = sql.replace("__SCORECUTOFF__", cutoff_score) sql = sql.replace("__PvalueCUTOFF__", cutoff_p) res = client.query(sql).to_dataframe() ###Output _____no_output_____ ###Markdown Get Yeast SL Interactions ###Code # List the SL partner genes for the input genes res ###Output _____no_output_____ ###Markdown **Gene_Input** the input gene symbol. **EntrezID_Input** shows the Entrez ids of the genes in the user's input gene list **EntrezID_SL_Candidate and Gene_SL_Candidate** the Entrez ids and gene symbols for the inferred synthetic lethal partners. **Interaction_score and P_value** the estimate of interaction strength between input gene and its SL partner in the isb-cgc-bq.supplementary_tables.Constanzo_etal_Science_2016_SGA_Genetic_Interactions table. The results can be saved to a csv file. ###Code res.to_csv(path_or_buf='conserved_SL_output.csv', index=False) ###Output _____no_output_____
04-Milestone Project - 1/04-OPTIONAL -Milestone Project 1 - Advanced Solution.ipynb
###Markdown Tic Tac Toe - Advanced SolutionThis solution follows the same basic format as the Complete Walkthrough Solution, but takes advantage of some of the more advanced statements we have learned. Feel free to download the notebook to understand how it works! ###Code # Specifically for the iPython Notebook environment for clearing output from IPython.display import clear_output import random # Global variables theBoard = [' '] * 10 # a list of empty spaces available = [str(num) for num in range(0,10)] # a List Comprehension players = [0,'X','O'] # note that players[1] == 'X' and players[-1] == 'O' def display_board(a,b): print('Available TIC-TAC-TOE\n'+ ' moves\n\n '+ a[7]+'|'+a[8]+'|'+a[9]+' '+b[7]+'|'+b[8]+'|'+b[9]+'\n '+ '----- -----\n '+ a[4]+'|'+a[5]+'|'+a[6]+' '+b[4]+'|'+b[5]+'|'+b[6]+'\n '+ '----- -----\n '+ a[1]+'|'+a[2]+'|'+a[3]+' '+b[1]+'|'+b[2]+'|'+b[3]+'\n') display_board(available,theBoard) def display_board(a,b): print(f'Available TIC-TAC-TOE\n moves\n\n {a[7]}|{a[8]}|{a[9]} {b[7]}|{b[8]}|{b[9]}\n ----- -----\n {a[4]}|{a[5]}|{a[6]} {b[4]}|{b[5]}|{b[6]}\n ----- -----\n {a[1]}|{a[2]}|{a[3]} {b[1]}|{b[2]}|{b[3]}\n') display_board(available,theBoard) def place_marker(avail,board,marker,position): board[position] = marker avail[position] = ' ' def win_check(board,mark): return ((board[7] == board[8] == board[9] == mark) or # across the top (board[4] == board[5] == board[6] == mark) or # across the middle (board[1] == board[2] == board[3] == mark) or # across the bottom (board[7] == board[4] == board[1] == mark) or # down the middle (board[8] == board[5] == board[2] == mark) or # down the middle (board[9] == board[6] == board[3] == mark) or # down the right side (board[7] == board[5] == board[3] == mark) or # diagonal (board[9] == board[5] == board[1] == mark)) # diagonal def random_player(): return random.choice((-1, 1)) def space_check(board,position): return board[position] == ' ' def full_board_check(board): return ' ' not in board[1:] def player_choice(board,player): position = 0 while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position): try: position = int(input('Player %s, choose your next position: (1-9) '%(player))) except: print("I'm sorry, please try again.") return position def replay(): return input('Do you want to play again? Enter Yes or No: ').lower().startswith('y') while True: clear_output() print('Welcome to Tic Tac Toe!') toggle = random_player() player = players[toggle] print('For this round, Player %s will go first!' %(player)) game_on = True input('Hit Enter to continue') while game_on: display_board(available,theBoard) position = player_choice(theBoard,player) place_marker(available,theBoard,player,position) if win_check(theBoard, player): display_board(available,theBoard) print('Congratulations! Player '+player+' wins!') game_on = False else: if full_board_check(theBoard): display_board(available,theBoard) print('The game is a draw!') break else: toggle *= -1 player = players[toggle] clear_output() # reset the board and available moves list theBoard = [' '] * 10 available = [str(num) for num in range(0,10)] if not replay(): break ###Output Welcome to Tic Tac Toe! For this round, Player X will go first! ###Markdown Tic Tac Toe - Advanced SolutionThis solution follows the same basic format as the Complete Walkthrough Solution, but takes advantage of some of the more advanced statements we have learned. Feel free to download the notebook to understand how it works! ###Code # Specifically for the iPython Notebook environment for clearing output from IPython.display import clear_output import random # Global variables theBoard = [' '] * 10 # a list of empty spaces available = [str(num) for num in range(0,10)] # a List Comprehension players = [0,'X','O'] # note that players[1] == 'X' and players[-1] == 'O' def display_board(a,b): print('Available TIC-TAC-TOE\n'+ ' moves\n\n '+ a[7]+'|'+a[8]+'|'+a[9]+' '+b[7]+'|'+b[8]+'|'+b[9]+'\n '+ '----- -----\n '+ a[4]+'|'+a[5]+'|'+a[6]+' '+b[4]+'|'+b[5]+'|'+b[6]+'\n '+ '----- -----\n '+ a[1]+'|'+a[2]+'|'+a[3]+' '+b[1]+'|'+b[2]+'|'+b[3]+'\n') display_board(available,theBoard) def display_board(a,b): print(f'Available TIC-TAC-TOE\n moves\n\n {a[7]}|{a[8]}|{a[9]} {b[7]}|{b[8]}|{b[9]}\n ----- -----\n {a[4]}|{a[5]}|{a[6]} {b[4]}|{b[5]}|{b[6]}\n ----- -----\n {a[1]}|{a[2]}|{a[3]} {b[1]}|{b[2]}|{b[3]}\n') display_board(available,theBoard) def place_marker(avail,board,marker,position): board[position] = marker avail[position] = ' ' def win_check(board,mark): return ((board[7] == board[8] == board[9] == mark) or # across the top (board[4] == board[5] == board[6] == mark) or # across the middle (board[1] == board[2] == board[3] == mark) or # across the bottom (board[7] == board[4] == board[1] == mark) or # down the middle (board[8] == board[5] == board[2] == mark) or # down the middle (board[9] == board[6] == board[3] == mark) or # down the right side (board[7] == board[5] == board[3] == mark) or # diagonal (board[9] == board[5] == board[1] == mark)) # diagonal def random_player(): return random.choice((-1, 1)) def space_check(board,position): return board[position] == ' ' def full_board_check(board): return ' ' not in board[1:] def player_choice(board,player): position = 0 while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position): try: position = int(input('Player %s, choose your next position: (1-9) '%(player))) except: print("I'm sorry, please try again.") return position def replay(): return input('Do you want to play again? Enter Yes or No: ').lower().startswith('y') while True: clear_output() print('Welcome to Tic Tac Toe!') toggle = random_player() player = players[toggle] print('For this round, Player %s will go first!' %(player)) game_on = True input('Hit Enter to continue') while game_on: display_board(available,theBoard) position = player_choice(theBoard,player) place_marker(available,theBoard,player,position) if win_check(theBoard, player): display_board(available,theBoard) print('Congratulations! Player '+player+' wins!') game_on = False else: if full_board_check(theBoard): display_board(available,theBoard) print('The game is a draw!') break else: toggle *= -1 player = players[toggle] clear_output() # reset the board and available moves list theBoard = [' '] * 10 available = [str(num) for num in range(0,10)] if not replay(): break ###Output Welcome to Tic Tac Toe! For this round, Player X will go first!
temas/I.computo_cientifico/1.6.Perfilamiento_Python.ipynb
###Markdown **Notas para contenedor de docker:** Comando de docker para ejecuciรณn de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.```docker run --rm -v :/datos --name jupyterlab_numerical -p 8888:8888 -p 8786:8786 -p 8787:8787 -d palmoreck/jupyterlab_numerical:1.1.0```password para jupyterlab: `qwerty`Detener el contenedor de docker:```docker stop jupyterlab_numerical``` Documentaciรณn de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Esta nota utiliza mรฉtodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) Instalamos las herramientas que nos ayudarรกn al perfilamiento: ###Code %pip install -q --user line_profiler %pip install -q --user memory_profiler %pip install -q --user psutil %pip install -q --user guppy3 ###Output WARNING: You are using pip version 19.3.1; however, version 20.0.2 is available. You should consider upgrading via the 'pip install --upgrade pip' command. Note: you may need to restart the kernel to use updated packages. ###Markdown La siguiente celda reiniciarรก el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook. ###Code import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) import math from scipy.integrate import quad ###Output _____no_output_____ ###Markdown Perfilamiento en Python En esta nota revisamos algunas herramientas de Python para perfilamiento de cรณdigo: uso de cpu y memoria.Mediciรณn de tiempos con:* Mรณdulo [time](https://docs.python.org/3/library/time.htmltime.time) de Python.* [%time](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-time) de comandos de magic <- esta herramienta es sรณlo para medir tiempos de un statement y sรณlo la coloco para referencia pero no se usarรก en la nota.* [/usr/bin/time](https://en.wikipedia.org/wiki/Time_(Unix)) de `Unix`.* [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-timeit) de comandos de magic.Perfilamiento:* De CPU con: [line_profiler](https://pypi.org/project/line-profiler/), [CProfile](https://docs.python.org/2/library/profile.html) que es `built-in` en la *standard-library* de Python.* De memoria con: [memory_profiler](https://pypi.org/project/memory-profiler/) y [heapy](https://pypi.org/project/guppy/). Mediciรณn de tiempos El primer acercamiento que usamos en la nota para perfilar nuestro cรณdigo es identificar quรฉ es lento, otras mediciones son la cantidad de RAM, el I/O en disco o network. 1) Uso de `time` ###Code import time ###Output _____no_output_____ ###Markdown Regla compuesta del rectรกngulo **Ejemplo de implementaciรณn de regla compuesta de rectรกngulo: usando math** Utilizar la regla compuesta del rectรกngulo para aproximar la integral $\int_0^1e^{-x^2}dx$ con $10^6$ subintervalos. ###Code f=lambda x: math.exp(-x**2) #using math library def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res n=10**6 start_time = time.time() aprox=Rcf(f,0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf tomรณ",secs,"segundos" ) ###Output Rcf tomรณ 0.3661477565765381 segundos ###Markdown **Obs:** recuรฉrdese que hay que evaluar que se estรฉ resolviendo correctamente el problema. En este caso el error relativo nos ayuda ###Code def err_relativo(aprox, obj): return math.fabs(aprox-obj)/math.fabs(obj) #obsรฉrvese el uso de la librerรญa math obj, err = quad(f, 0, 1) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown **Comentarios:*** Tรณmese en cuenta que al medir tiempos de ejecuciรณn, siempre hay variaciรณn en la mediciรณn. Tal variaciรณn es normal.* Considรฉrese que la mรกquina en la que se estรกn corriendo las pruebas puede estar realizando otras tareas mientras se ejecuta el cรณdigo, por ejemplo acceso a la red, al disco o a la RAM. Por ello, son factores que pueden causar variaciรณn en el tiempo de ejecuciรณn del programa.* Si se van a realizar reportes de tiempos, es importante indicar las caracterรญsticas de la mรกquina en la que se estรกn haciendo las pruebas, p.ej: Dell E6420 con un procesador Intel Core I7-2720QM (2.20 GHz, 6 MB cache, Quad Core) y 8 GB de RAM en un Ubuntu $13.10$. 2) Uso de `/usr/bin/time` de Unix Para la lรญnea de comando `/usr/bin/time` primero escribimos el siguiente archivo en la ruta donde se encuentra este notebook con la lรญnea de comando magic `%file` ###Code %%file Rcf.py import math def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n))) ###Output Writing Rcf.py ###Markdown Ver [liga](https://stackoverflow.com/questions/419163/what-does-if-name-main-do) y [liga2](https://es.stackoverflow.com/questions/32165/qu%C3%A9-es-if-name-main) para saber quรฉ es lo que hace la lรญnea `if __name__ == "__main__":` Lo siguiente es necesario si no tienen instalado el comando `/usr/bin/time`: ###Code %%bash sudo apt-get install time %%bash /usr/bin/time -p python3 Rcf.py #la p es de portabilidad, #ver: http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html #para mayor informaciรณn ###Output aproximaciรณn: 7.468241e-01 ###Markdown **Comentarios:*** `real` que mide el wall clock o elapsed time.* `user` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ para funciones que no estรกn relacionadas con el kernel* del sistema.* `sys` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ en funciones a nivel de kernel del sistema.\*Ver [kernel operating system](https://en.wikipedia.org/wiki/Kernel_(operating_system)) para definiciรณn del kernel de una mรกquina.**Obs:** Una funciรณn relacionada con el kernel del sistema es el alojamiento de memoria al crear una variable. Otras son las instrucciones relacionadas con el I/O como leer de la memoria, disco o network.* La ventaja de `/usr/bin/time` es que no es especรญfico de Python.* Este comando incluye el tiempo que le toma al sistema iniciar el ejecutable de python (que puede ser significativo si se inician muchos procesos vs un sรณlo proceso). En el caso de tener short-running scripts donde el tiempo de inicio es significativo del tiempo total entonces `/usr/bin/time` puede ser una medida รบtil.**Nota:** Si se suma `user` con `sys` se tiene una idea de cuรกnto tiempo se gastรณ en la CPU y la diferencia entre este resultado y `real` da una idea de cuรกnto tiempo se gastรณ para I/O o tambiรฉn puede dar una idea de la cantidad de tiempo que se ocupรณ el sistema en correr otras tareas. * Se puede utilizar la flag `verbose` para obtener mรกs informaciรณn: ###Code %%bash /usr/bin/time --verbose python3 Rcf.py ###Output aproximaciรณn: 7.468241e-01 ###Markdown y una explicaciรณn (breve) del output se puede encontrar [aquรญ](http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html). Para el caso de `Major (requiring I/O)` nos interesa que sea $0$ pues indica que el sistema operativo tiene que cargar pรกginas de datos del disco pues tales datos ya no residen en RAM (por alguna razรณn). 3) Uso de `%timeit` El mรณdulo de `timeit` es otra forma de medir el tiempo de ejecuciรณn en la CPU.**Nota:** el mรณdulo de `timeit` desabilita temporalmente el garbage collector* de Python (esto es, no habrรก desalojamiento en memoria de objetos de Python que no se utilicen). Si el garbage collector es invocado en tus operaciones para un ejemplo del mundo real, esto puede ser una razรณn de posibles diferencias que obtengas en las mediciones de tiempo. *sugiero buscar quรฉ es el garbage collector en blogs, por ejemplo: [liga](https://rushter.com/blog/python-garbage-collector/) o [liga2](https://stackify.com/python-garbage-collection/) o [liga3](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation). ###Code %timeit? %timeit -n 5 -r 10 Rcf(f,0,1,n) ###Output 333 ms ยฑ 11.1 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown para este caso se estรก ejecutando la funciรณn `Rcf` en un loop de tamaรฑo $5$, se estรกn promediando los tiempos de las $5$ ejecuciones y calculando su desviaciรณn estรกndar y al repetir esto $10$ veces se estรก reportando el mejor resultado. $ms$ es milisecond, $\mu s$ es microsecond y $ns$ es nanosecond. **Comentarios:*** `timeit` se recomienda usar para secciones de cรณdigo pequeรฑas. Para secciones mรกs grandes tรญpicamente modificar el valor de $n$ (ejecutar el cรณdigo n veces en un loop) resulta en mediciones distintas.* Ejecuta `timeit` varias ocasiones para asegurarse que se obtienen tiempos similares. Si observas una gran variaciรณn en las mediciones de tiempo entre distintas repeticiones de `timeit`, realiza mรกs repeticiones hasta tener un resultado estable. Mediciรณn de uso de CPU 1) Uso de cProfile `cProfile` es una herramienta **built-in** en la standard library para perfilamiento. Se utiliza con la implementaciรณn `CPython` de `Python` (ver [liga](https://stackoverflow.com/questions/17130975/python-vs-cpython) para explicaciรณn de implementaciones de Python) para medir el tiempo de ejecuciรณn de cada funciรณn en el programa. Se ejecuta desde la lรญnea de comandos o con un comando de magic. La flag `-s` indica que se ordene el resultado por el tiempo acumulado dentro de cada funciรณn. El output siguiente de `cProfile` muestra:* El tiempo total de ejecuciรณn, el cual incluye el tiempo del bloque de cรณdigo que estamos midiendo y el overhead al usar `cProfile`. Por esta razรณn se tiene un mayor tiempo de ejecuciรณn que con las mediciones de tiempo anteriores.* La columna `ncalls` que como el nombre indica, muestra el nรบmero de veces que se llamรณ a cada funciรณn. En este caso las funciones `lambda` y `math.exp` son las que se llaman un mayor nรบmero de veces: $n=10^6$ veces. La columna`tottime` muestra el tiempo que tardaron estas funciones en ejecutarse (sin llamar a otras funciones).* La columna `percall` es el cociente entre `tottime` y `ncalls`.* La columna `cumtime` contiene el tiempo gastado en la funciรณn y en las demรกs que llama. Por ejemplo la funciรณn `Rcf` llama a `listcomp` por lo que es natural que `Rcf` estรฉ mรกs arriba en el output ordenado de `cProfile`. Esto tambiรฉn ocurre con `lambda` y `math.exp` pues la primera llama a la segunda.* La columna de `percall` es un cociente entre la columna `cumtime` y el llamado a primitivas.* La รบltima columna indica informaciรณn de la funciรณn y la lรญnea en la que se encuentra dentro del cรณdigo. Por ejemplo la lรญnea $1$ de mรณdulo es el llamado a la funciรณn `__main__`. La lรญnea $2$ es el llamado a la funciรณn `Rcf`. Por lo que es prรกcticamente negligible el llamado a `__main__`. ###Code %%bash python3 -m cProfile -s cumulative Rcf.py ###Output aproximaciรณn: 7.468241e-01 2000068 function calls in 0.649 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.649 0.649 {built-in method builtins.exec} 1 0.021 0.021 0.649 0.649 Rcf.py:1(<module>) 1 0.155 0.155 0.627 0.627 Rcf.py:2(Rcf) 1000000 0.252 0.000 0.343 0.000 Rcf.py:23(<lambda>) 1 0.129 0.129 0.129 0.129 Rcf.py:16(<listcomp>) 1000000 0.090 0.000 0.090 0.000 {built-in method math.exp} 1 0.000 0.000 0.001 0.001 <frozen importlib._bootstrap>:966(_find_and_load) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 2 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 0.000 0.000 {built-in method _imp.create_builtin} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) 1 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:103(release) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) 1 0.000 0.000 0.000 0.000 {built-in method builtins.print} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 2 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 2 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) 1 0.000 0.000 0.000 0.000 {built-in method builtins.any} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 4 0.000 0.000 0.000 0.000 {built-in method builtins.getattr} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:369(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message) 2 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects} 2 0.000 0.000 0.000 0.000 {method 'rpartition' of 'str' objects} 1 0.000 0.000 0.000 0.000 {built-in method _imp.is_builtin} 4 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:307(__init__) 4 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:143(__init__) 3 0.000 0.000 0.000 0.000 {built-in method _imp.acquire_lock} 3 0.000 0.000 0.000 0.000 {built-in method _imp.release_lock} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} 1 0.000 0.000 0.000 0.000 {built-in method _imp.exec_builtin} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:424(has_location) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:753(is_package) ###Markdown **Nota:** Recordar que el output de `CProfile` con la flag `-s cumulative` estรก ordenando por el gasto en tiempo de las funciones que son llamadas en el bloque de cรณdigo analizado. No estรก ordenando por parent functions. Para tener un output en el que se tenga quรฉ funciones llaman a quรฉ otras se puede utilizar lo siguiente: ###Code %%bash python3 -m cProfile -o profile.stats Rcf.py import pstats p = pstats.Stats("profile.stats") p.sort_stats("cumulative") p.print_stats() p.print_callers() ###Output Ordered by: cumulative time Function was called by... ncalls tottime cumtime {built-in method builtins.exec} <- Rcf.py:1(<module>) <- 1 0.014 0.587 {built-in method builtins.exec} Rcf.py:2(Rcf) <- 1 0.131 0.573 Rcf.py:1(<module>) Rcf.py:23(<lambda>) <- 1000000 0.218 0.300 Rcf.py:2(Rcf) Rcf.py:16(<listcomp>) <- 1 0.142 0.142 Rcf.py:2(Rcf) {built-in method math.exp} <- 1000000 0.082 0.082 Rcf.py:23(<lambda>) <frozen importlib._bootstrap>:966(_find_and_load) <- 1 0.000 0.001 Rcf.py:1(<module>) <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:651(_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <frozen importlib._bootstrap>:564(module_from_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:728(create_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) <frozen importlib._bootstrap>:211(_call_with_frames_removed) <- 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) {built-in method _imp.create_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:147(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:870(_find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <frozen importlib._bootstrap>:157(_get_module_lock) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) <frozen importlib._bootstrap>:707(find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {method 'format' of 'str' objects} <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:433(spec_from_loader) <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) <frozen importlib._bootstrap>:58(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) <frozen importlib._bootstrap>:504(_init_module_attrs) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) {built-in method builtins.print} <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:318(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:151(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:78(acquire) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) <frozen importlib._bootstrap>:103(release) <- 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) {built-in method builtins.hasattr} <- 2 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) {built-in method builtins.getattr} <- 4 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) {built-in method builtins.any} <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) <frozen importlib._bootstrap>:369(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) <frozen importlib._bootstrap>:416(parent) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:736(exec_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:176(cb) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) {built-in method _imp.is_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) <frozen importlib._bootstrap>:311(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:143(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:222(_verbose_message) <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) {method 'get' of 'dict' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:307(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:321(<genexpr>) <- 4 0.000 0.000 {built-in method builtins.any} <frozen importlib._bootstrap>:847(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {method 'rpartition' of 'str' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) {built-in method _thread.allocate_lock} <- 2 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) {built-in method _thread.get_ident} <- 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 <frozen importlib._bootstrap>:103(release) {built-in method _imp.acquire_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) {built-in method _imp.release_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) {built-in method _imp.exec_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:843(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {method 'disable' of '_lsprof.Profiler' objects} <- <frozen importlib._bootstrap>:424(has_location) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:753(is_package) <- 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) ###Markdown y podemos tambiรฉn tener la informaciรณn de a quรฉ funciones llamรณ cada funciรณn ###Code p.print_callees() ###Output Ordered by: cumulative time Function called... ncalls tottime cumtime {built-in method builtins.exec} -> 1 0.014 0.587 Rcf.py:1(<module>) Rcf.py:1(<module>) -> 1 0.000 0.001 <frozen importlib._bootstrap>:966(_find_and_load) 1 0.131 0.573 Rcf.py:2(Rcf) 1 0.000 0.000 {built-in method builtins.print} 1 0.000 0.000 {method 'format' of 'str' objects} Rcf.py:2(Rcf) -> 1 0.142 0.142 Rcf.py:16(<listcomp>) 1000000 0.218 0.300 Rcf.py:23(<lambda>) Rcf.py:23(<lambda>) -> 1000000 0.082 0.082 {built-in method math.exp} Rcf.py:16(<listcomp>) -> {built-in method math.exp} -> <frozen importlib._bootstrap>:966(_find_and_load) -> 1 0.000 0.000 <frozen importlib._bootstrap>:143(__init__) 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) 1 0.000 0.000 {method 'get' of 'dict' objects} <frozen importlib._bootstrap>:936(_find_and_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) 1 0.000 0.000 {method 'rpartition' of 'str' objects} <frozen importlib._bootstrap>:651(_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:307(__init__) 1 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) 1 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:564(module_from_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:728(create_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:211(_call_with_frames_removed) -> 1 0.000 0.000 {built-in method _imp.create_builtin} 1 0.000 0.000 {built-in method _imp.exec_builtin} {built-in method _imp.create_builtin} -> <frozen importlib._bootstrap>:147(__enter__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) <frozen importlib._bootstrap>:870(_find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) <frozen importlib._bootstrap>:157(_get_module_lock) -> 1 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) 1 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 {built-in method _imp.release_lock} <frozen importlib._bootstrap>:707(find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 {built-in method _imp.is_builtin} {method 'format' of 'str' objects} -> <frozen importlib._bootstrap>:433(spec_from_loader) -> 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) 1 0.000 0.000 <frozen importlib._bootstrap>:369(__init__) 2 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:58(__init__) -> 2 0.000 0.000 {built-in method _thread.allocate_lock} <frozen importlib._bootstrap>:504(_init_module_attrs) -> 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 <frozen importlib._bootstrap>:424(has_location) 4 0.000 0.000 {built-in method builtins.getattr} {built-in method builtins.print} -> <frozen importlib._bootstrap>:318(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message) 1 0.000 0.000 {built-in method builtins.any} <frozen importlib._bootstrap>:151(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:103(release) <frozen importlib._bootstrap>:78(acquire) -> 1 0.000 0.000 {built-in method _thread.get_ident} <frozen importlib._bootstrap>:103(release) -> 1 0.000 0.000 {built-in method _thread.get_ident} <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) -> 1 0.000 0.000 <frozen importlib._bootstrap>:753(is_package) {built-in method builtins.hasattr} -> {built-in method builtins.getattr} -> {built-in method builtins.any} -> 4 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>) <frozen importlib._bootstrap>:369(__init__) -> <frozen importlib._bootstrap>:416(parent) -> 1 0.000 0.000 {method 'rpartition' of 'str' objects} <frozen importlib._bootstrap>:736(exec_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:176(cb) -> 1 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 {built-in method _imp.release_lock} 1 0.000 0.000 {method 'get' of 'dict' objects} {built-in method _imp.is_builtin} -> <frozen importlib._bootstrap>:311(__enter__) -> <frozen importlib._bootstrap>:143(__init__) -> <frozen importlib._bootstrap>:222(_verbose_message) -> {method 'get' of 'dict' objects} -> <frozen importlib._bootstrap>:307(__init__) -> <frozen importlib._bootstrap>:321(<genexpr>) -> <frozen importlib._bootstrap>:847(__exit__) -> 1 0.000 0.000 {built-in method _imp.release_lock} {method 'rpartition' of 'str' objects} -> {built-in method _thread.allocate_lock} -> {built-in method _thread.get_ident} -> {built-in method _imp.acquire_lock} -> {built-in method _imp.release_lock} -> {built-in method _imp.exec_builtin} -> <frozen importlib._bootstrap>:843(__enter__) -> 1 0.000 0.000 {built-in method _imp.acquire_lock} {method 'disable' of '_lsprof.Profiler' objects} -> <frozen importlib._bootstrap>:424(has_location) -> <frozen importlib._bootstrap>:753(is_package) -> ###Markdown El comando de magic es `%prun`: ###Code %prun -s cumulative Rcf(f,0,1,n) ###Output ###Markdown 2) Uso de line_profiler `line_profiler` trabaja perfilando el cรณdigo de forma individual funciones lรญnea por lรญnea. La idea serรญa perfilar primero con `CProfile` al programa para identificar aquellas funciones que gastan un mayor tiempo de ejecuciรณn y posteriormente perfilarlas con `line_profiler`. **Comentario:** una buena prรกctica es guardar las diferentes versiones de tu cรณdigo cuando vas modificรกndolo para tener un registro de tus cambios. Puede ejecutarse desde la lรญnea de comandos o cargarse en IPython con el comando magic `load_ext`: ###Code %load_ext line_profiler %lprun? ###Output _____no_output_____ ###Markdown En el siguiente output:* La columna `%Time` contiene el porcentaje de tiempo gastado. En el caso que se perfila, la lรญnea`sum_res=sum_res+f(node)` es en la que mรกs porcentaje del tiempo se gasta. Seguida de la lรญnea del `for` y de la lรญnea donde se hace uso de [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions) para crear a los nodos de integraciรณn numรฉrica. ###Code %lprun -f Rcf Rcf(f,0,1,n) ###Output _____no_output_____ ###Markdown Con la evidencia generada con `line_profiler` ยฟpodrรญamos escribir una funciรณn que fuera mรกs rรกpida? Lo primero que podemos hacer es utilizar un [generator](https://wiki.python.org/moin/Generators) en lugar de una lista: ###Code def Rcf2(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf2 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res ###Output _____no_output_____ ###Markdown medir con `%timeit`: ###Code %timeit -n 5 -r 10 Rcf2(f,0,1,n) aprox=Rcf2(f,0,1,n) ###Output _____no_output_____ ###Markdown revisar que estรก correcta esta nueva implementaciรณn: ###Code err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown perfilarla con `line_profiler`: ###Code %lprun -f Rcf2 Rcf2(f,0,1,n) ###Output _____no_output_____ ###Markdown y observar que la lรญnea en la que se creaba la lista ahora es despreciable el porcentaje de tiempo que se gasta en ella. Podemos hacer una implementaciรณn que se encargue del gasto del tiempo en la lรญnea del `for`: ###Code def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum((f(node) for node in nodes)) return h_hat*suma_res ###Output _____no_output_____ ###Markdown medir con `%timeit`: ###Code %timeit -n 5 -r 10 Rcf3(f,0,1,n) ###Output 363 ms ยฑ 4.99 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown revisar que estรก correcta esta nueva implementaciรณn: ###Code aprox=Rcf3(f,0,1,n) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown perfilarla con `line_profiler`: ###Code %lprun -f Rcf3 Rcf3(f,0,1,n) ###Output _____no_output_____ ###Markdown y se tiene la mayorรญa del porcentaje de tiempo ahora en una sola lรญnea. Recuรฉrdese que el resultado de `Cprofile` indicรณ que se llama a la funciรณn `lambda` y `math.exp` $n=10^6$ veces. Una implementaciรณn de la regla del rectรกngulo con menor nรบmero de llamadas a funciones (y por tanto menor tiempo) serรญa: ###Code def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res %lprun -f Rcf4 Rcf4(0,1,n) %timeit -n 5 -r 10 Rcf4(0,1,n) aprox=Rcf4(0,1,n) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown Si bien esta implementaciรณn es la mรกs rรกpida hasta este punto no es tan flexible pues estรก calculando la regla del rectรกngulo para una funciรณn definida dentro de la misma funciรณn. Si quisiรฉramos calcular la regla para otra funciรณn se tendrรญa que directamente modificar la funciรณn `Rcf` lo cual no es flexible. Aunque `Rcf4` es mรกs rรกpida preferimos `Rcf3` por su flexibilidad y menor uso de recursos (que se verรก con el `memory_profiler` mรกs adelante). ###Code def Rcf5(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n f_nodes=(math.exp(-(a+(i+1/2)*h_hat)**2) for i in range(0,n)) suma_res = sum(f_nodes) return h_hat*suma_res %lprun -f Rcf5 Rcf5(0,1,n) %timeit -n 5 -r 10 Rcf5(0,1,n) aprox=Rcf5(0,1,n) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown Obsรฉrvese que en una lรญnea se estรกn construyendo nodos y transformando con `math.exp` en `Rcf5`. Aunque esta implementaciรณn es la mรกs rรกpida hasta ahora, no se sugiere usarla pues le falta flexibilidad como `Rcf4` y no es recomendable en una lรญnea construir datos y transformarlos. Combinar operaciones en una sola lรญnea resulta en cรณdigo difรญcil de leer. Es mejor separar en dos funciones estas dos tareas por si falla una sepamos cuรกl fallรณ y por quรฉ fallรณ. **Ejemplo de ejecuciรณn de line_profiler desde la lรญnea de comandos:** ###Code %%file Rcf4.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con line_profiler def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res if __name__ == "__main__": n=10**6 print("aproximaciรณn: {:0.6e}".format(Rcf4(0,1,n))) %%bash $HOME/.local/bin/kernprof -l -v Rcf4.py ###Output aproximaciรณn: 7.468241e-01 Wrote profile results to Rcf4.py.lprof Timer unit: 1e-06 s Total time: 0.77406 s File: Rcf4.py Function: Rcf4 at line 2 Line # Hits Time Per Hit % Time Line Contents ============================================================== 2 @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con line_profiler 4 def Rcf4(a,b,n): 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n 9 Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors 10 Args: 11 f (lambda expression): lambda expression of integrand 12 a (int): left point of interval 13 b (int): right point of interval 14 n (int): number of subintervals 15 Returns: 16 Rcf4 (float) 17 """ 18 1 2.0 2.0 0.0 h_hat=(b-a)/n 19 1 5.0 5.0 0.0 nodes=(a+(i+1/2)*h_hat for i in range(0,n)) 20 1 774051.0 774051.0 100.0 suma_res = sum(((math.exp(-node**2) for node in nodes))) 21 1 2.0 2.0 0.0 return h_hat*suma_res ###Markdown Observese en el output de `CProfile` siguiente para la funciรณn `Rcf4` que las lรญneas con mayor gasto en el tiempo total son: ``` nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes)))``` ###Code import math def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res %prun -s cumulative Rcf4(0,1,n) ###Output ###Markdown Uso de memoria RAM Al realizar anรกlisis del uso de memoria de tu cรณdigo podemos responder preguntas como:* ยฟEs posible utilizar menos RAM al reescribir mi funciรณn para que trabaje mรกs eficientemente?* ยฟPodemos usar mรกs RAM para aprovechar mejor el uso del cachรฉ? 1)Uso de `%memit` Es equivalente a `%timeit` en el sentido que realiza una serie de repeticiones para obtener un resultado estable del bloque de cรณdigo analizado. ###Code %load_ext memory_profiler %memit? ###Output _____no_output_____ ###Markdown Primero medimos cuรกnto RAM estรก utilizando el proceso del notebook: ###Code %memit #how much RAM this process is consuming ###Output peak memory: 119.73 MiB, increment: 0.00 MiB ###Markdown Y podemos realizar mediciones para cada una de las implementaciones de la regla del rectรกngulo: ###Code %memit -c Rcf(f,0,1,n) %memit -c Rcf2(f,0,1,n) %memit -c Rcf3(f,0,1,10**5) %memit -c Rcf4(0,1,10**5) %memit -c Rcf5(0,1,10**5) ###Output peak memory: 228.77 MiB, increment: 103.05 MiB ###Markdown El uso de `generators` nos ayuda a disminuir la cantidad de memoria RAM usada por nuestro proceso. 2) Uso de `memory_profiler` Para mediciรณn de memoria lรญnea por lรญnea utilizamos `memory_profiler`. Se ejecuta mรกs lento que `line_profiler` (entre $10$ y $100$ veces mรกs lento!) y mejora su velocidad de ejecuciรณn al instalar el paquete `psutil`. Con lรญnea de comandos se ejecuta como sigue: ###Code %%file Rcf_memory_profiler.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con memory_profiler def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n))) ###Output Writing Rcf_memory_profiler.py ###Markdown En el output siguiente se observa que la lรญnea que mรกs incrementa la cantidad de RAM alojada para el proceso que contiene la ejecuciรณn de la funciรณn `Rcf` es la creaciรณn de la lista de nodos `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`. **Cuidado:** el valor de la columna `Increment` para esta lรญnea no necesariamente indica que la lista `nodes` ocupa en memoria $512 MB$'s, sรณlo que para la alocaciรณn de la lista el proceso creciรณ en $512 MB$'s**Nota:** en el output aparece $MiB$ que son mebibytes. Aunque no se cumple que un mebibyte sea igual a un megabyte, se toma en este comentario como megabytes pues la diferencia entre estas unidades es sutil. ###Code %%bash python3 -m memory_profiler Rcf_memory_profiler.py ###Output aproximaciรณn: 7.468241e-01 Filename: Rcf_memory_profiler.py Line # Mem usage Increment Line Contents ================================================ 2 37.750 MiB 37.750 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con memory_profiler 4 def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n 9 Args: 10 f (lambda expression): lambda expression of integrand 11 a (int): left point of interval 12 b (int): right point of interval 13 n (int): number of subintervals 14 Returns: 15 Rcf (float) 16 """ 17 37.750 MiB 0.000 MiB h_hat=(b-a)/n 18 69.012 MiB 0.512 MiB nodes=[a+(i+1/2)*h_hat for i in range(0,n)] 19 69.012 MiB 0.000 MiB sum_res=0 20 69.012 MiB 0.000 MiB for node in nodes: 21 69.012 MiB 0.000 MiB sum_res=sum_res+f(node) 22 69.012 MiB 0.000 MiB return h_hat*sum_res ###Markdown Como ya se habรญa notado, los generators ahorran memoria: ###Code %%file Rcf3_memory_profiler.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con memory_profiler def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum((f(node) for node in nodes)) return h_hat*suma_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf3 n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf3(f,0,1,n))) ###Output Writing Rcf3_memory_profiler.py ###Markdown En el output siguiente el proceso que involucra la ejecuciรณn de la funciรณn `Rcf3` no incrementa el uso de memoria RAM por el uso de generators: ###Code %%bash python3 -m memory_profiler Rcf3_memory_profiler.py ###Output aproximaciรณn: 7.468241e-01 Filename: Rcf3_memory_profiler.py Line # Mem usage Increment Line Contents ================================================ 2 37.590 MiB 37.590 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con memory_profiler 4 def Rcf3(f,a,b,n): 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n 9 Args: 10 f (lambda expression): lambda expression of integrand 11 a (int): left point of interval 12 b (int): right point of interval 13 n (int): number of subintervals 14 Returns: 15 Rcf3 (float) 16 """ 17 37.590 MiB 0.000 MiB h_hat=(b-a)/n 18 37.590 MiB 0.000 MiB nodes=(a+(i+1/2)*h_hat for i in range(0,n)) 19 37.590 MiB 0.000 MiB suma_res = sum((f(node) for node in nodes)) 20 37.590 MiB 0.000 MiB return h_hat*suma_res ###Markdown 3) Uso de heapy Con `heapy` podemos revisar el nรบmero y tamaรฑo de cada objeto que estรก en el heap de Python (ver [liga](https://docs.python.org/3/c-api/memory.html) y [liga2](https://stackoverflow.com/questions/14546178/does-python-have-a-stack-heap-and-how-is-memory-managed) para memory management). Tambiรฉn ayuda a encontrar **memory leaks** que ocurren si apuntamos a un objeto al que ya no deberรญamos estar apuntando... ver [liga3](https://en.wikipedia.org/wiki/Memory_leak) para saber quรฉ son las memory leaks. ###Code import math from guppy import hpy def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ hp=hpy() h_hat=(b-a)/n h=hp.heap() print("beginning of Rcf") print(h) nodes=[a+(i+1/2)*h_hat for i in range(0,n)] h=hp.heap() print("After creating list") print(h) sum_res=0 for node in nodes: sum_res=sum_res+f(node) h=hp.heap() print("After loop") print(h) return h_hat*sum_res Rcf(f,0,1,n) import math from guppy import hpy def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ hp=hpy() h_hat=(b-a)/n h=hp.heap() print("beginning of Rcf3") print(h) nodes=(a+(i+1/2)*h_hat for i in range(0,n)) h=hp.heap() print("After creating generator") print(h) suma_res = sum((f(node) for node in nodes)) h=hp.heap() print("After loop") print(h) return h_hat*suma_res Rcf3(f,0,1,n) ###Output beginning of Rcf3 Partition of a set of 451930 objects. Total size = 56178992 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123040 27 9506016 17 26823863 48 tuple 2 54025 12 4265717 8 31089580 55 bytes 3 27255 6 3942936 7 35032516 62 types.CodeType 4 25716 6 3497376 6 38529892 69 function 5 3155 1 3112744 6 41642636 74 type 6 6819 2 2830712 5 44473348 79 dict (no owner) 7 1244 0 1935072 3 46408420 83 dict of module 8 3155 1 1578376 3 47986796 85 dict of type 9 2286 1 846912 2 48833708 87 set <1047 more rows. Type e.g. '_.more' to view.> After creating generator Partition of a set of 451952 objects. Total size = 56180784 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123041 27 9506072 17 26823919 48 tuple 2 54025 12 4265717 8 31089636 55 bytes 3 27255 6 3942936 7 35032572 62 types.CodeType 4 25716 6 3497376 6 38529948 69 function 5 3155 1 3112744 6 41642692 74 type 6 6820 2 2830952 5 44473644 79 dict (no owner) 7 1244 0 1935072 3 46408716 83 dict of module 8 3155 1 1578376 3 47987092 85 dict of type 9 2286 1 846912 2 48834004 87 set <1049 more rows. Type e.g. '_.more' to view.> After loop Partition of a set of 451944 objects. Total size = 56179648 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123040 27 9506016 17 26823863 48 tuple 2 54025 12 4265717 8 31089580 55 bytes 3 27255 6 3942936 7 35032516 62 types.CodeType 4 25716 6 3497376 6 38529892 69 function 5 3155 1 3112744 6 41642636 74 type 6 6819 2 2830712 5 44473348 79 dict (no owner) 7 1244 0 1935072 3 46408420 83 dict of module 8 3155 1 1578376 3 47986796 85 dict of type 9 2286 1 846912 2 48833708 87 set <1047 more rows. Type e.g. '_.more' to view.> ###Markdown **Notas para contenedor de docker:** Comando de docker para ejecuciรณn de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.```docker run --rm -v :/datos --name jupyterlab_numerical -p 8888:8888 -d palmoreck/jupyterlab_numerical:1.1.0```password para jupyterlab: `qwerty`Detener el contenedor de docker:```docker stop jupyterlab_numerical``` Documentaciรณn de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Esta nota utiliza mรฉtodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) Instalamos las herramientas que nos ayudarรกn al perfilamiento: ###Code %pip install -q --user line_profiler %pip install -q --user memory_profiler %pip install -q --user psutil %pip install -q --user guppy3 ###Output WARNING: You are using pip version 19.3.1; however, version 20.0.2 is available. You should consider upgrading via the 'pip install --upgrade pip' command. Note: you may need to restart the kernel to use updated packages. ###Markdown La siguiente celda reiniciarรก el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook. ###Code import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) import math from scipy.integrate import quad ###Output _____no_output_____ ###Markdown Perfilamiento en Python En esta nota revisamos algunas herramientas de Python para perfilamiento de cรณdigo: uso de cpu y memoria.Mediciรณn de tiempos con:* Mรณdulo [time](https://docs.python.org/3/library/time.htmltime.time) de Python.* [%time](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-time) de comandos de magic <- esta herramienta es sรณlo para medir tiempos de un statement y sรณlo la coloco para referencia pero no se usarรก en la nota.* [/usr/bin/time](https://en.wikipedia.org/wiki/Time_(Unix)) de `Unix`.* [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-timeit) de comandos de magic.Perfilamiento:* De CPU con: [line_profiler](https://pypi.org/project/line-profiler/), [CProfile](https://docs.python.org/2/library/profile.html) que es `built-in` en la *standard-library* de Python.* De memoria con: [memory_profiler](https://pypi.org/project/memory-profiler/) y [heapy](https://pypi.org/project/guppy/). Mediciรณn de tiempos El primer acercamiento que usamos en la nota para perfilar nuestro cรณdigo es identificar quรฉ es lento, otras mediciones son la cantidad de RAM, el I/O en disco o network. 1) Uso de `time` ###Code import time ###Output _____no_output_____ ###Markdown Regla compuesta del rectรกngulo **Ejemplo de implementaciรณn de regla compuesta de rectรกngulo: usando math** Utilizar la regla compuesta del rectรกngulo para aproximar la integral $\int_0^1e^{-x^2}dx$ con $10^6$ subintervalos. ###Code f=lambda x: math.exp(-x**2) #using math library def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res n=10**6 start_time = time.time() aprox=Rcf(f,0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf tomรณ",secs,"segundos" ) ###Output Rcf tomรณ 0.3661477565765381 segundos ###Markdown **Obs:** recuรฉrdese que hay que evaluar que se estรฉ resolviendo correctamente el problema. En este caso el error relativo nos ayuda ###Code def err_relativo(aprox, obj): return math.fabs(aprox-obj)/math.fabs(obj) #obsรฉrvese el uso de la librerรญa math obj, err = quad(f, 0, 1) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown **Comentarios:*** Tรณmese en cuenta que al medir tiempos de ejecuciรณn, siempre hay variaciรณn en la mediciรณn. Tal variaciรณn es normal.* Considรฉrese que la mรกquina en la que se estรกn corriendo las pruebas puede estar realizando otras tareas mientras se ejecuta el cรณdigo, por ejemplo acceso a la red, al disco o a la RAM. Por ello, son factores que pueden causar variaciรณn en el tiempo de ejecuciรณn del programa.* Si se van a realizar reportes de tiempos, es importante indicar las caracterรญsticas de la mรกquina en la que se estรกn haciendo las pruebas, p.ej: Dell E6420 con un procesador Intel Core I7-2720QM (2.20 GHz, 6 MB cache, Quad Core) y 8 GB de RAM en un Ubuntu $13.10$. 2) Uso de `/usr/bin/time` de Unix Para la lรญnea de comando `/usr/bin/time` primero escribimos el siguiente archivo en la ruta donde se encuentra este notebook con la lรญnea de comando magic `%file` ###Code %%file Rcf.py import math def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n))) ###Output Writing Rcf.py ###Markdown Ver [liga](https://stackoverflow.com/questions/419163/what-does-if-name-main-do) y [liga2](https://es.stackoverflow.com/questions/32165/qu%C3%A9-es-if-name-main) para saber quรฉ es lo que hace la lรญnea `if __name__ == "__main__":` Lo siguiente es necesario si no tienen instalado el comando `/usr/bin/time`: ###Code %%bash sudo apt-get install time %%bash /usr/bin/time -p python3 Rcf.py #la p es de portabilidad, #ver: http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html #para mayor informaciรณn ###Output aproximaciรณn: 7.468241e-01 ###Markdown **Comentarios:*** `real` que mide el wall clock o elapsed time.* `user` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ para funciones que no estรกn relacionadas con el kernel* del sistema.* `sys` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ en funciones a nivel de kernel del sistema.\*Ver [kernel operating system](https://en.wikipedia.org/wiki/Kernel_(operating_system)) para definiciรณn del kernel de una mรกquina.**Obs:** Una funciรณn relacionada con el kernel del sistema es el alojamiento de memoria al crear una variable. Otras son las instrucciones relacionadas con el I/O como leer de la memoria, disco o network.* La ventaja de `/usr/bin/time` es que no es especรญfico de Python.* Este comando incluye el tiempo que le toma al sistema iniciar el ejecutable de python (que puede ser significativo si se inician muchos procesos vs un sรณlo proceso). En el caso de tener short-running scripts donde el tiempo de inicio es significativo del tiempo total entonces `/usr/bin/time` puede ser una medida รบtil.**Nota:** Si se suma `user` con `sys` se tiene una idea de cuรกnto tiempo se gastรณ en la CPU y la diferencia entre este resultado y `real` da una idea de cuรกnto tiempo se gastรณ para I/O o tambiรฉn puede dar una idea de la cantidad de tiempo que se ocupรณ el sistema en correr otras tareas. * Se puede utilizar la flag `verbose` para obtener mรกs informaciรณn: ###Code %%bash /usr/bin/time --verbose python3 Rcf.py ###Output aproximaciรณn: 7.468241e-01 ###Markdown y una explicaciรณn (breve) del output se puede encontrar [aquรญ](http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html). Para el caso de `Major (requiring I/O)` nos interesa que sea $0$ pues indica que el sistema operativo tiene que cargar pรกginas de datos del disco pues tales datos ya no residen en RAM (por alguna razรณn). 3) Uso de `%timeit` El mรณdulo de `timeit` es otra forma de medir el tiempo de ejecuciรณn en la CPU.**Nota:** el mรณdulo de `timeit` desabilita temporalmente el garbage collector* de Python (esto es, no habrรก desalojamiento en memoria de objetos de Python que no se utilicen). Si el garbage collector es invocado en tus operaciones para un ejemplo del mundo real, esto puede ser una razรณn de posibles diferencias que obtengas en las mediciones de tiempo. *sugiero buscar quรฉ es el garbage collector en blogs, por ejemplo: [liga](https://rushter.com/blog/python-garbage-collector/) o [liga2](https://stackify.com/python-garbage-collection/) o [liga3](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation). ###Code %timeit? %timeit -n 5 -r 10 Rcf(f,0,1,n) ###Output 333 ms ยฑ 11.1 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown para este caso se estรก ejecutando la funciรณn `Rcf` en un loop de tamaรฑo $5$, se estรกn promediando los tiempos de las $5$ ejecuciones y calculando su desviaciรณn estรกndar y al repetir esto $10$ veces se estรก reportando el mejor resultado. $ms$ es milisecond, $\mu s$ es microsecond y $ns$ es nanosecond. **Comentarios:*** `timeit` se recomienda usar para secciones de cรณdigo pequeรฑas. Para secciones mรกs grandes tรญpicamente modificar el valor de $n$ (ejecutar el cรณdigo n veces en un loop) resulta en mediciones distintas.* Ejecuta `timeit` varias ocasiones para asegurarse que se obtienen tiempos similares. Si observas una gran variaciรณn en las mediciones de tiempo entre distintas repeticiones de `timeit`, realiza mรกs repeticiones hasta tener un resultado estable. Mediciรณn de uso de CPU 1) Uso de cProfile `cProfile` es una herramienta **built-in** en la standard library para perfilamiento. Se utiliza con la implementaciรณn `CPython` de `Python` (ver [liga](https://stackoverflow.com/questions/17130975/python-vs-cpython) para explicaciรณn de implementaciones de Python) para medir el tiempo de ejecuciรณn de cada funciรณn en el programa. Se ejecuta desde la lรญnea de comandos o con un comando de magic. La flag `-s` indica que se ordene el resultado por el tiempo acumulado dentro de cada funciรณn. El output siguiente de `cProfile` muestra:* El tiempo total de ejecuciรณn, el cual incluye el tiempo del bloque de cรณdigo que estamos midiendo y el overhead al usar `cProfile`. Por esta razรณn se tiene un mayor tiempo de ejecuciรณn que con las mediciones de tiempo anteriores.* La columna `ncalls` que como el nombre indica, muestra el nรบmero de veces que se llamรณ a cada funciรณn. En este caso las funciones `lambda` y `math.exp` son las que se llaman un mayor nรบmero de veces: $n=10^6$ veces. La columna`tottime` muestra el tiempo que tardaron estas funciones en ejecutarse (sin llamar a otras funciones).* La columna `percall` es el cociente entre `tottime` y `ncalls`.* La columna `cumtime` contiene el tiempo gastado en la funciรณn y en las demรกs que llama. Por ejemplo la funciรณn `Rcf` llama a `listcomp` por lo que es natural que `Rcf` estรฉ mรกs arriba en el output ordenado de `cProfile`. Esto tambiรฉn ocurre con `lambda` y `math.exp` pues la primera llama a la segunda.* La columna de `percall` es un cociente entre la columna `cumtime` y el llamado a primitivas.* La รบltima columna indica informaciรณn de la funciรณn y la lรญnea en la que se encuentra dentro del cรณdigo. Por ejemplo la lรญnea $1$ de mรณdulo es el llamado a la funciรณn `__main__`. La lรญnea $2$ es el llamado a la funciรณn `Rcf`. Por lo que es prรกcticamente negligible el llamado a `__main__`. ###Code %%bash python3 -m cProfile -s cumulative Rcf.py ###Output aproximaciรณn: 7.468241e-01 2000068 function calls in 0.649 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.649 0.649 {built-in method builtins.exec} 1 0.021 0.021 0.649 0.649 Rcf.py:1(<module>) 1 0.155 0.155 0.627 0.627 Rcf.py:2(Rcf) 1000000 0.252 0.000 0.343 0.000 Rcf.py:23(<lambda>) 1 0.129 0.129 0.129 0.129 Rcf.py:16(<listcomp>) 1000000 0.090 0.000 0.090 0.000 {built-in method math.exp} 1 0.000 0.000 0.001 0.001 <frozen importlib._bootstrap>:966(_find_and_load) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 2 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 0.000 0.000 {built-in method _imp.create_builtin} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) 1 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:103(release) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) 1 0.000 0.000 0.000 0.000 {built-in method builtins.print} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 2 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 2 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) 1 0.000 0.000 0.000 0.000 {built-in method builtins.any} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 4 0.000 0.000 0.000 0.000 {built-in method builtins.getattr} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:369(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message) 2 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects} 2 0.000 0.000 0.000 0.000 {method 'rpartition' of 'str' objects} 1 0.000 0.000 0.000 0.000 {built-in method _imp.is_builtin} 4 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:307(__init__) 4 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:143(__init__) 3 0.000 0.000 0.000 0.000 {built-in method _imp.acquire_lock} 3 0.000 0.000 0.000 0.000 {built-in method _imp.release_lock} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} 1 0.000 0.000 0.000 0.000 {built-in method _imp.exec_builtin} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:424(has_location) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:753(is_package) ###Markdown **Nota:** Recordar que el output de `CProfile` con la flag `-s cumulative` estรก ordenando por el gasto en tiempo de las funciones que son llamadas en el bloque de cรณdigo analizado. No estรก ordenando por parent functions. Para tener un output en el que se tenga quรฉ funciones llaman a quรฉ otras se puede utilizar lo siguiente: ###Code %%bash python3 -m cProfile -o profile.stats Rcf.py import pstats p = pstats.Stats("profile.stats") p.sort_stats("cumulative") p.print_stats() p.print_callers() ###Output Ordered by: cumulative time Function was called by... ncalls tottime cumtime {built-in method builtins.exec} <- Rcf.py:1(<module>) <- 1 0.014 0.587 {built-in method builtins.exec} Rcf.py:2(Rcf) <- 1 0.131 0.573 Rcf.py:1(<module>) Rcf.py:23(<lambda>) <- 1000000 0.218 0.300 Rcf.py:2(Rcf) Rcf.py:16(<listcomp>) <- 1 0.142 0.142 Rcf.py:2(Rcf) {built-in method math.exp} <- 1000000 0.082 0.082 Rcf.py:23(<lambda>) <frozen importlib._bootstrap>:966(_find_and_load) <- 1 0.000 0.001 Rcf.py:1(<module>) <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:651(_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <frozen importlib._bootstrap>:564(module_from_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:728(create_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) <frozen importlib._bootstrap>:211(_call_with_frames_removed) <- 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) {built-in method _imp.create_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:147(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:870(_find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <frozen importlib._bootstrap>:157(_get_module_lock) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) <frozen importlib._bootstrap>:707(find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {method 'format' of 'str' objects} <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:433(spec_from_loader) <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) <frozen importlib._bootstrap>:58(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) <frozen importlib._bootstrap>:504(_init_module_attrs) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) {built-in method builtins.print} <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:318(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:151(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:78(acquire) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) <frozen importlib._bootstrap>:103(release) <- 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) {built-in method builtins.hasattr} <- 2 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) {built-in method builtins.getattr} <- 4 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) {built-in method builtins.any} <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) <frozen importlib._bootstrap>:369(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) <frozen importlib._bootstrap>:416(parent) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:736(exec_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:176(cb) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) {built-in method _imp.is_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) <frozen importlib._bootstrap>:311(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:143(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:222(_verbose_message) <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) {method 'get' of 'dict' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:307(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:321(<genexpr>) <- 4 0.000 0.000 {built-in method builtins.any} <frozen importlib._bootstrap>:847(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {method 'rpartition' of 'str' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) {built-in method _thread.allocate_lock} <- 2 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) {built-in method _thread.get_ident} <- 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 <frozen importlib._bootstrap>:103(release) {built-in method _imp.acquire_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) {built-in method _imp.release_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) {built-in method _imp.exec_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:843(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {method 'disable' of '_lsprof.Profiler' objects} <- <frozen importlib._bootstrap>:424(has_location) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:753(is_package) <- 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) ###Markdown y podemos tambiรฉn tener la informaciรณn de a quรฉ funciones llamรณ cada funciรณn ###Code p.print_callees() ###Output Ordered by: cumulative time Function called... ncalls tottime cumtime {built-in method builtins.exec} -> 1 0.014 0.587 Rcf.py:1(<module>) Rcf.py:1(<module>) -> 1 0.000 0.001 <frozen importlib._bootstrap>:966(_find_and_load) 1 0.131 0.573 Rcf.py:2(Rcf) 1 0.000 0.000 {built-in method builtins.print} 1 0.000 0.000 {method 'format' of 'str' objects} Rcf.py:2(Rcf) -> 1 0.142 0.142 Rcf.py:16(<listcomp>) 1000000 0.218 0.300 Rcf.py:23(<lambda>) Rcf.py:23(<lambda>) -> 1000000 0.082 0.082 {built-in method math.exp} Rcf.py:16(<listcomp>) -> {built-in method math.exp} -> <frozen importlib._bootstrap>:966(_find_and_load) -> 1 0.000 0.000 <frozen importlib._bootstrap>:143(__init__) 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) 1 0.000 0.000 {method 'get' of 'dict' objects} <frozen importlib._bootstrap>:936(_find_and_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) 1 0.000 0.000 {method 'rpartition' of 'str' objects} <frozen importlib._bootstrap>:651(_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:307(__init__) 1 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) 1 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:564(module_from_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:728(create_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:211(_call_with_frames_removed) -> 1 0.000 0.000 {built-in method _imp.create_builtin} 1 0.000 0.000 {built-in method _imp.exec_builtin} {built-in method _imp.create_builtin} -> <frozen importlib._bootstrap>:147(__enter__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) <frozen importlib._bootstrap>:870(_find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) <frozen importlib._bootstrap>:157(_get_module_lock) -> 1 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) 1 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 {built-in method _imp.release_lock} <frozen importlib._bootstrap>:707(find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 {built-in method _imp.is_builtin} {method 'format' of 'str' objects} -> <frozen importlib._bootstrap>:433(spec_from_loader) -> 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) 1 0.000 0.000 <frozen importlib._bootstrap>:369(__init__) 2 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:58(__init__) -> 2 0.000 0.000 {built-in method _thread.allocate_lock} <frozen importlib._bootstrap>:504(_init_module_attrs) -> 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 <frozen importlib._bootstrap>:424(has_location) 4 0.000 0.000 {built-in method builtins.getattr} {built-in method builtins.print} -> <frozen importlib._bootstrap>:318(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message) 1 0.000 0.000 {built-in method builtins.any} <frozen importlib._bootstrap>:151(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:103(release) <frozen importlib._bootstrap>:78(acquire) -> 1 0.000 0.000 {built-in method _thread.get_ident} <frozen importlib._bootstrap>:103(release) -> 1 0.000 0.000 {built-in method _thread.get_ident} <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) -> 1 0.000 0.000 <frozen importlib._bootstrap>:753(is_package) {built-in method builtins.hasattr} -> {built-in method builtins.getattr} -> {built-in method builtins.any} -> 4 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>) <frozen importlib._bootstrap>:369(__init__) -> <frozen importlib._bootstrap>:416(parent) -> 1 0.000 0.000 {method 'rpartition' of 'str' objects} <frozen importlib._bootstrap>:736(exec_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:176(cb) -> 1 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 {built-in method _imp.release_lock} 1 0.000 0.000 {method 'get' of 'dict' objects} {built-in method _imp.is_builtin} -> <frozen importlib._bootstrap>:311(__enter__) -> <frozen importlib._bootstrap>:143(__init__) -> <frozen importlib._bootstrap>:222(_verbose_message) -> {method 'get' of 'dict' objects} -> <frozen importlib._bootstrap>:307(__init__) -> <frozen importlib._bootstrap>:321(<genexpr>) -> <frozen importlib._bootstrap>:847(__exit__) -> 1 0.000 0.000 {built-in method _imp.release_lock} {method 'rpartition' of 'str' objects} -> {built-in method _thread.allocate_lock} -> {built-in method _thread.get_ident} -> {built-in method _imp.acquire_lock} -> {built-in method _imp.release_lock} -> {built-in method _imp.exec_builtin} -> <frozen importlib._bootstrap>:843(__enter__) -> 1 0.000 0.000 {built-in method _imp.acquire_lock} {method 'disable' of '_lsprof.Profiler' objects} -> <frozen importlib._bootstrap>:424(has_location) -> <frozen importlib._bootstrap>:753(is_package) -> ###Markdown El comando de magic es `%prun`: ###Code %prun -s cumulative Rcf(f,0,1,n) ###Output ###Markdown 2) Uso de line_profiler `line_profiler` trabaja perfilando el cรณdigo de forma individual funciones lรญnea por lรญnea. La idea serรญa perfilar primero con `CProfile` al programa para identificar aquellas funciones que gastan un mayor tiempo de ejecuciรณn y posteriormente perfilarlas con `line_profiler`. **Comentario:** una buena prรกctica es guardar las diferentes versiones de tu cรณdigo cuando vas modificรกndolo para tener un registro de tus cambios. Puede ejecutarse desde la lรญnea de comandos o cargarse en IPython con el comando magic `load_ext`: ###Code %load_ext line_profiler %lprun? ###Output _____no_output_____ ###Markdown En el siguiente output:* La columna `%Time` contiene el porcentaje de tiempo gastado. En el caso que se perfila, la lรญnea`sum_res=sum_res+f(node)` es en la que mรกs porcentaje del tiempo se gasta. Seguida de la lรญnea del `for` y de la lรญnea donde se hace uso de [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions) para crear a los nodos de integraciรณn numรฉrica. ###Code %lprun -f Rcf Rcf(f,0,1,n) ###Output _____no_output_____ ###Markdown Con la evidencia generada con `line_profiler` ยฟpodrรญamos escribir una funciรณn que fuera mรกs rรกpida? Lo primero que podemos hacer es utilizar un [generator](https://wiki.python.org/moin/Generators) en lugar de una lista: ###Code def Rcf2(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf2 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res ###Output _____no_output_____ ###Markdown medir con `%timeit`: ###Code %timeit -n 5 -r 10 Rcf2(f,0,1,n) aprox=Rcf2(f,0,1,n) ###Output _____no_output_____ ###Markdown revisar que estรก correcta esta nueva implementaciรณn: ###Code err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown perfilarla con `line_profiler`: ###Code %lprun -f Rcf2 Rcf2(f,0,1,n) ###Output _____no_output_____ ###Markdown y observar que la lรญnea en la que se creaba la lista ahora es despreciable el porcentaje de tiempo que se gasta en ella. Podemos hacer una implementaciรณn que se encargue del gasto del tiempo en la lรญnea del `for`: ###Code def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum((f(node) for node in nodes)) return h_hat*suma_res ###Output _____no_output_____ ###Markdown medir con `%timeit`: ###Code %timeit -n 5 -r 10 Rcf3(f,0,1,n) ###Output 363 ms ยฑ 4.99 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown revisar que estรก correcta esta nueva implementaciรณn: ###Code aprox=Rcf3(f,0,1,n) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown perfilarla con `line_profiler`: ###Code %lprun -f Rcf3 Rcf3(f,0,1,n) ###Output _____no_output_____ ###Markdown y se tiene la mayorรญa del porcentaje de tiempo ahora en una sola lรญnea. Recuรฉrdese que el resultado de `Cprofile` indicรณ que se llama a la funciรณn `lambda` y `math.exp` $n=10^6$ veces. Una implementaciรณn de la regla del rectรกngulo con menor nรบmero de llamadas a funciones (y por tanto menor tiempo) serรญa: ###Code def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res %lprun -f Rcf4 Rcf4(0,1,n) %timeit -n 5 -r 10 Rcf4(0,1,n) aprox=Rcf4(0,1,n) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown Si bien esta implementaciรณn es la mรกs rรกpida hasta este punto no es tan flexible pues estรก calculando la regla del rectรกngulo para una funciรณn definida dentro de la misma funciรณn. Si quisiรฉramos calcular la regla para otra funciรณn se tendrรญa que directamente modificar la funciรณn `Rcf` lo cual no es flexible. Aunque `Rcf4` es mรกs rรกpida preferimos `Rcf3` por su flexibilidad y menor uso de recursos (que se verรก con el `memory_profiler` mรกs adelante). ###Code def Rcf5(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1 Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n f_nodes=(math.exp(-(a+(i+1/2)*h_hat)**2) for i in range(0,n)) suma_res = sum(f_nodes) return h_hat*suma_res %lprun -f Rcf5 Rcf5(0,1,n) %timeit -n 5 -r 10 Rcf5(0,1,n) aprox=Rcf5(0,1,n) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown Obsรฉrvese que en una lรญnea se estรกn construyendo nodos y transformando con `math.exp` en `Rcf5`. Aunque esta implementaciรณn es la mรกs rรกpida hasta ahora, no se sugiere usarla pues le falta flexibilidad como `Rcf4` y no es recomendable en una lรญnea construir datos y transformarlos. Combinar operaciones en una sola lรญnea resulta en cรณdigo difรญcil de leer. Es mejor separar en dos funciones estas dos tareas por si falla una sepamos cuรกl fallรณ y por quรฉ fallรณ. **Ejemplo de ejecuciรณn de line_profiler desde la lรญnea de comandos:** ###Code %%file Rcf4.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con line_profiler def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res if __name__ == "__main__": n=10**6 print("aproximaciรณn: {:0.6e}".format(Rcf4(0,1,n))) %%bash $HOME/.local/bin/kernprof -l -v Rcf4.py ###Output aproximaciรณn: 7.468241e-01 Wrote profile results to Rcf4.py.lprof Timer unit: 1e-06 s Total time: 0.77406 s File: Rcf4.py Function: Rcf4 at line 2 Line # Hits Time Per Hit % Time Line Contents ============================================================== 2 @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con line_profiler 4 def Rcf4(a,b,n): 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n 9 Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors 10 Args: 11 f (lambda expression): lambda expression of integrand 12 a (int): left point of interval 13 b (int): right point of interval 14 n (int): number of subintervals 15 Returns: 16 Rcf4 (float) 17 """ 18 1 2.0 2.0 0.0 h_hat=(b-a)/n 19 1 5.0 5.0 0.0 nodes=(a+(i+1/2)*h_hat for i in range(0,n)) 20 1 774051.0 774051.0 100.0 suma_res = sum(((math.exp(-node**2) for node in nodes))) 21 1 2.0 2.0 0.0 return h_hat*suma_res ###Markdown Observese en el output de `CProfile` siguiente para la funciรณn `Rcf4` que las lรญneas con mayor gasto en el tiempo total son: ``` nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes)))``` ###Code import math def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res %prun -s cumulative Rcf4(0,1,n) ###Output ###Markdown Uso de memoria RAM Al realizar anรกlisis del uso de memoria de tu cรณdigo podemos responder preguntas como:* ยฟEs posible utilizar menos RAM al reescribir mi funciรณn para que trabaje mรกs eficientemente?* ยฟPodemos usar mรกs RAM para aprovechar mejor el uso del cachรฉ? 1)Uso de `%memit` Es equivalente a `%timeit` en el sentido que realiza una serie de repeticiones para obtener un resultado estable del bloque de cรณdigo analizado. ###Code %load_ext memory_profiler %memit? ###Output _____no_output_____ ###Markdown Primero medimos cuรกnto RAM estรก utilizando el proceso del notebook: ###Code %memit #how much RAM this process is consuming ###Output peak memory: 119.73 MiB, increment: 0.00 MiB ###Markdown Y podemos realizar mediciones para cada una de las implementaciones de la regla del rectรกngulo: ###Code %memit -c Rcf(f,0,1,n) %memit -c Rcf2(f,0,1,n) %memit -c Rcf3(f,0,1,10**5) %memit -c Rcf4(0,1,10**5) %memit -c Rcf5(0,1,10**5) ###Output peak memory: 228.77 MiB, increment: 103.05 MiB ###Markdown El uso de `generators` nos ayuda a disminuir la cantidad de memoria RAM usada por nuestro proceso. 2) Uso de `memory_profiler` Para mediciรณn de memoria lรญnea por lรญnea utilizamos `memory_profiler`. Se ejecuta mรกs lento que `line_profiler` (entre $10$ y $100$ veces mรกs lento!) y mejora su velocidad de ejecuciรณn al instalar el paquete `psutil`. Con lรญnea de comandos se ejecuta como sigue: ###Code %%file Rcf_memory_profiler.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con memory_profiler def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n))) ###Output Writing Rcf_memory_profiler.py ###Markdown En el output siguiente se observa que la lรญnea que mรกs incrementa la cantidad de RAM alojada para el proceso que contiene la ejecuciรณn de la funciรณn `Rcf` es la creaciรณn de la lista de nodos `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`. **Cuidado:** el valor de la columna `Increment` para esta lรญnea no necesariamente indica que la lista `nodes` ocupa en memoria $512 MB$'s, sรณlo que para la alocaciรณn de la lista el proceso creciรณ en $512 MB$'s**Nota:** en el output aparece $MiB$ que son mebibytes. Aunque no se cumple que un mebibyte sea igual a un megabyte, se toma en este comentario como megabytes pues la diferencia entre estas unidades es sutil. ###Code %%bash python3 -m memory_profiler Rcf_memory_profiler.py ###Output aproximaciรณn: 7.468241e-01 Filename: Rcf_memory_profiler.py Line # Mem usage Increment Line Contents ================================================ 2 37.750 MiB 37.750 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con memory_profiler 4 def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n 9 Args: 10 f (lambda expression): lambda expression of integrand 11 a (int): left point of interval 12 b (int): right point of interval 13 n (int): number of subintervals 14 Returns: 15 Rcf (float) 16 """ 17 37.750 MiB 0.000 MiB h_hat=(b-a)/n 18 69.012 MiB 0.512 MiB nodes=[a+(i+1/2)*h_hat for i in range(0,n)] 19 69.012 MiB 0.000 MiB sum_res=0 20 69.012 MiB 0.000 MiB for node in nodes: 21 69.012 MiB 0.000 MiB sum_res=sum_res+f(node) 22 69.012 MiB 0.000 MiB return h_hat*sum_res ###Markdown Como ya se habรญa notado, los generators ahorran memoria: ###Code %%file Rcf3_memory_profiler.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con memory_profiler def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum((f(node) for node in nodes)) return h_hat*suma_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf3 n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf3(f,0,1,n))) ###Output Writing Rcf3_memory_profiler.py ###Markdown En el output siguiente el proceso que involucra la ejecuciรณn de la funciรณn `Rcf3` no incrementa el uso de memoria RAM por el uso de generators: ###Code %%bash python3 -m memory_profiler Rcf3_memory_profiler.py ###Output aproximaciรณn: 7.468241e-01 Filename: Rcf3_memory_profiler.py Line # Mem usage Increment Line Contents ================================================ 2 37.590 MiB 37.590 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con memory_profiler 4 def Rcf3(f,a,b,n): 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n 9 Args: 10 f (lambda expression): lambda expression of integrand 11 a (int): left point of interval 12 b (int): right point of interval 13 n (int): number of subintervals 14 Returns: 15 Rcf3 (float) 16 """ 17 37.590 MiB 0.000 MiB h_hat=(b-a)/n 18 37.590 MiB 0.000 MiB nodes=(a+(i+1/2)*h_hat for i in range(0,n)) 19 37.590 MiB 0.000 MiB suma_res = sum((f(node) for node in nodes)) 20 37.590 MiB 0.000 MiB return h_hat*suma_res ###Markdown 3) Uso de heapy Con `heapy` podemos revisar el nรบmero y tamaรฑo de cada objeto que estรก en el heap de Python (ver [liga](https://docs.python.org/3/c-api/memory.html) y [liga2](https://stackoverflow.com/questions/14546178/does-python-have-a-stack-heap-and-how-is-memory-managed) para memory management). Tambiรฉn ayuda a encontrar **memory leaks** que ocurren si apuntamos a un objeto al que ya no deberรญamos estar apuntando... ver [liga3](https://en.wikipedia.org/wiki/Memory_leak) para saber quรฉ son las memory leaks. ###Code import math from guppy import hpy def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ hp=hpy() h_hat=(b-a)/n h=hp.heap() print("beginning of Rcf") print(h) nodes=[a+(i+1/2)*h_hat for i in range(0,n)] h=hp.heap() print("After creating list") print(h) sum_res=0 for node in nodes: sum_res=sum_res+f(node) h=hp.heap() print("After loop") print(h) return h_hat*sum_res Rcf(f,0,1,n) import math from guppy import hpy def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ hp=hpy() h_hat=(b-a)/n h=hp.heap() print("beginning of Rcf3") print(h) nodes=(a+(i+1/2)*h_hat for i in range(0,n)) h=hp.heap() print("After creating generator") print(h) suma_res = sum((f(node) for node in nodes)) h=hp.heap() print("After loop") print(h) return h_hat*suma_res Rcf3(f,0,1,n) ###Output beginning of Rcf3 Partition of a set of 451930 objects. Total size = 56178992 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123040 27 9506016 17 26823863 48 tuple 2 54025 12 4265717 8 31089580 55 bytes 3 27255 6 3942936 7 35032516 62 types.CodeType 4 25716 6 3497376 6 38529892 69 function 5 3155 1 3112744 6 41642636 74 type 6 6819 2 2830712 5 44473348 79 dict (no owner) 7 1244 0 1935072 3 46408420 83 dict of module 8 3155 1 1578376 3 47986796 85 dict of type 9 2286 1 846912 2 48833708 87 set <1047 more rows. Type e.g. '_.more' to view.> After creating generator Partition of a set of 451952 objects. Total size = 56180784 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123041 27 9506072 17 26823919 48 tuple 2 54025 12 4265717 8 31089636 55 bytes 3 27255 6 3942936 7 35032572 62 types.CodeType 4 25716 6 3497376 6 38529948 69 function 5 3155 1 3112744 6 41642692 74 type 6 6820 2 2830952 5 44473644 79 dict (no owner) 7 1244 0 1935072 3 46408716 83 dict of module 8 3155 1 1578376 3 47987092 85 dict of type 9 2286 1 846912 2 48834004 87 set <1049 more rows. Type e.g. '_.more' to view.> After loop Partition of a set of 451944 objects. Total size = 56179648 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123040 27 9506016 17 26823863 48 tuple 2 54025 12 4265717 8 31089580 55 bytes 3 27255 6 3942936 7 35032516 62 types.CodeType 4 25716 6 3497376 6 38529892 69 function 5 3155 1 3112744 6 41642636 74 type 6 6819 2 2830712 5 44473348 79 dict (no owner) 7 1244 0 1935072 3 46408420 83 dict of module 8 3155 1 1578376 3 47986796 85 dict of type 9 2286 1 846912 2 48833708 87 set <1047 more rows. Type e.g. '_.more' to view.> ###Markdown Esta nota utiliza mรฉtodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) **Notas para contenedor de docker:** Comando de docker para ejecuciรณn de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.```docker run --rm -v :/datos --name jupyterlab_numerical -p 8888:8888 -p 8786:8786 -p 8787:8787 -d palmoreck/jupyterlab_numerical:1.1.0```Detener el contenedor de docker:```docker stop jupyterlab_local``` Documentaciรณn de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Instalamos las herramientas que nos ayudarรกn al perfilamiento: ###Code %pip install -q --user line_profiler %pip install -q --user memory_profiler %pip install -q --user psutil %pip install -q --user guppy3 ###Output Note: you may need to restart the kernel to use updated packages. ###Markdown La siguiente celda reiniciarรก el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook. ###Code import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) import math from scipy.integrate import quad ###Output _____no_output_____ ###Markdown Perfilamiento en Python En esta nota revisamos algunas herramientas de Python para perfilamiento de cรณdigo: uso de cpu y memoria.Mediciรณn de tiempos con:* Mรณdulo [time](https://docs.python.org/3/library/time.htmltime.time) de Python.* [%time](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-time) de comandos de magic <- esta herramienta es sรณlo para medir tiempos de un statement y sรณlo la coloco para referencia pero no se usarรก en la nota.* [/usr/bin/time](https://en.wikipedia.org/wiki/Time_(Unix)) de `Unix`.* [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-timeit) de comandos de magic.Perfilamiento:* De CPU con: [line_profiler](https://pypi.org/project/line-profiler/), [CProfile](https://docs.python.org/2/library/profile.html) que es `built-in` en la *standard-library* de Python.* De memoria con: [memory_profiler](https://pypi.org/project/memory-profiler/) y [heapy](https://pypi.org/project/guppy/). Mediciรณn de tiempos El primer acercamiento que usamos en la nota para perfilar nuestro cรณdigo es identificar quรฉ es lento, otras mediciones son la cantidad de RAM, el I/O en disco o network. 1) Uso de `time` ###Code import time ###Output _____no_output_____ ###Markdown Regla compuesta del rectรกngulo **Ejemplo de implementaciรณn de regla compuesta de rectรกngulo: usando math** Utilizar la regla compuesta del rectรกngulo para aproximar la integral $\int_0^1e^{-x^2}dx$ con $10^6$ subintervalos. ###Code f=lambda x: math.exp(-x**2) #using math library def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res n=10**6 start_time = time.time() aprox=Rcf(f,0,1,n) end_time = time.time() secs = end_time-start_time print("Rcf tomรณ",secs,"segundos" ) ###Output Rcf tomรณ 0.5433993339538574 segundos ###Markdown **Obs:** recuรฉrdese que hay que evaluar que se estรฉ resolviendo correctamente el problema. En este caso el error relativo nos ayuda ###Code def err_relativo(aprox, obj): return math.fabs(aprox-obj)/math.fabs(obj) #obsรฉrvese el uso de la librerรญa math obj, err = quad(f, 0, 1) err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown **Comentarios:*** Tรณmese en cuenta que al medir tiempos de ejecuciรณn, siempre hay variaciรณn en la mediciรณn. Tal variaciรณn es normal.* Considรฉrese que la mรกquina en la que se estรกn corriendo las pruebas puede estar realizando otras tareas mientras se ejecuta el cรณdigo, por ejemplo acceso a la red, al disco o a la RAM. Por ello, son factores que pueden causar variaciรณn en el tiempo de ejecuciรณn del programa.* Si se van a realizar reportes de tiempos, es importante indicar las caracterรญsticas de la mรกquina en la que se estรกn haciendo las pruebas, p.ej: Dell E6420 con un procesador Intel Core I7-2720QM (2.20 GHz, 6 MB cache, Quad Core) y 8 GB de RAM en un Ubuntu $13.10$. 2) Uso de `/usr/bin/time` de Unix Para la lรญnea de comando `/usr/bin/time` primero escribimos el siguiente archivo en la ruta donde se encuentra este notebook con la lรญnea de comando magic `%file` ###Code %%file Rcf.py import math def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n))) ###Output Overwriting Rcf.py ###Markdown Ver [liga](https://stackoverflow.com/questions/419163/what-does-if-name-main-do) y [liga2](https://es.stackoverflow.com/questions/32165/qu%C3%A9-es-if-name-main) para saber quรฉ es lo que hace la lรญnea `if __name__ == "__main__":` Lo siguiente es necesario si no tienen instalado el comando `/usr/bin/time`: ###Code %%bash sudo apt-get install time %%bash /usr/bin/time -p python3 Rcf.py #la p es de portabilidad, #ver: http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html #para mayor informaciรณn ###Output aproximaciรณn: 7.468241e-01 ###Markdown **Comentarios:*** `real` que mide el wall clock o elapsed time.* `user` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ para funciones que no estรกn relacionadas con el kernel del sistema.* `sys` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ en funciones a nivel de kernel del sistema.**Obs:** Una funciรณn relacionada con el kernel del sistema es el alojamiento de memoria al crear una variable.* La ventaja de `/usr/bin/time` es que no es especรญfico de Python.* Este comando incluye el tiempo que le toma al sistema iniciar el ejecutable de python (que puede ser significativo si se inician muchos procesos vs un sรณlo proceso). En el caso de tener short-running scripts donde el tiempo de inicio es significativo del tiempo total entonces `/usr/bin/time` puede ser una medida รบtil.**Nota:** Si se suma `user` con `sys` se tiene una idea de cuรกnto tiempo se gastรณ en la CPU y la diferencia entre este resultado y `real` da una idea de cuรกnto tiempo se gastรณ para I/O o tambiรฉn puede dar una idea de la cantidad de tiempo que se ocupรณ el sistema en correr otras tareas. * Se puede utilizar la flag `verbose` para obtener mรกs informaciรณn: ###Code %%bash /usr/bin/time --verbose python3 Rcf.py ###Output aproximaciรณn: 7.468241e-01 ###Markdown y una explicaciรณn (breve) del output se puede encontrar [aquรญ](http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html). Para el caso de `Major (requiring I/O)` nos interesa que sea $0$ pues indica que el sistema operativo tiene que cargar pรกginas de datos del disco pues tales datos ya no residen en RAM (por alguna razรณn). 3) Uso de `%timeit` El mรณdulo de `timeit` es otra forma de medir el tiempo de ejecuciรณn en la CPU.**Nota:** el mรณdulo de `timeit` desabilita temporalmente el garbage collector* de Python (esto es, no habrรก desalojamiento en memoria de objetos de Python que no se utilicen). Si el garbage collector es invocado en tus operaciones para un ejemplo del mundo real, esto puede ser una razรณn de posibles diferencias que obtengas en las mediciones de tiempo. *sugiero buscar quรฉ es el garbage collector en blogs, por ejemplo: [liga](https://rushter.com/blog/python-garbage-collector/) o [liga2](https://stackify.com/python-garbage-collection/) o [liga3](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation). ###Code %timeit? %timeit -n 5 -r 10 Rcf(f,0,1,n) ###Output 407 ms ยฑ 37.3 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown para este caso se estรก ejecutando la funciรณn `Rcf` en un loop de tamaรฑo $5$, se estรกn promediando los tiempos de las $5$ ejecuciones y calculando su desviaciรณn estรกndar y al repetir esto $10$ veces se estรก reportando el mejor resultado. $ms$ es milisecond, $\mu s$ es microsecond y $ns$ es nanosecond. **Comentarios:*** `timeit` se recomienda usar para secciones de cรณdigo pequeรฑas. Para secciones mรกs grandes tรญpicamente modificar el valor de $n$ (ejecutar el cรณdigo n veces en un loop) resulta en mediciones distintas.* Ejecuta `timeit` varias ocasiones para asegurarse que se obtienen tiempos similares. Si observas una gran variaciรณn en las mediciones de tiempo entre distintas repeticiones de `timeit`, realiza mรกs repeticiones hasta tener un resultado estable. Mediciรณn de uso de CPU 1) Uso de cProfile `cProfile` es una herramienta **built-in** en la standard library para perfilamiento. Se utiliza con la implementaciรณn `CPython` de `Python` (ver [liga](https://stackoverflow.com/questions/17130975/python-vs-cpython) para explicaciรณn de implementaciones de Python) para medir el tiempo de ejecuciรณn de cada funciรณn en el programa. Se ejecuta desde la lรญnea de comandos o con un comando de magic. La flag `-s` indica que se ordene el resultado por el tiempo acumulado dentro de cada funciรณn. El output siguiente de `cProfile` muestra:* El tiempo total de ejecuciรณn, el cual incluye el tiempo del bloque de cรณdigo que estamos midiendo y el overhead al usar `cProfile`. Por esta razรณn se tiene un mayor tiempo de ejecuciรณn que con las mediciones de tiempo anteriores.* La columna `ncalls` que como el nombre indica, muestra el nรบmero de veces que se llamรณ a cada funciรณn. En este caso las funciones `lambda` y `math.exp` son las que se llaman un mayor nรบmero de veces: $n=10^6$ veces. La columna`tottime` muestra el tiempo que tardaron estas funciones en ejecutarse (sin llamar a otras funciones).* La columna `percall` es el cociente entre `tottime` y `ncalls`.* La columna `cumtime` contiene el tiempo gastado en la funciรณn y en las demรกs que llama. Por ejemplo la funciรณn `Rcf` llama a `listcomp` por lo que es natural que `Rcf` estรฉ mรกs arriba en el output ordenado de `cProfile`. Esto tambiรฉn ocurre con `lambda` y `math.exp` pues la primera llama a la segunda.* La columna de `percall` es un cociente entre la columna `cumtime` y el llamado a primitivas.* La รบltima columna indica informaciรณn de la funciรณn y la lรญnea en la que se encuentra dentro del cรณdigo. Por ejemplo la lรญnea $1$ de mรณdulo es el llamado a la funciรณn `__main__`. La lรญnea $2$ es el llamado a la funciรณn `Rcf`. Por lo que es prรกcticamente negligible el llamado a `__main__`. ###Code %%bash python3 -m cProfile -s cumulative Rcf.py ###Output aproximaciรณn: 7.468241e-01 2000068 function calls in 0.638 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.638 0.638 {built-in method builtins.exec} 1 0.013 0.013 0.638 0.638 Rcf.py:1(<module>) 1 0.152 0.152 0.625 0.625 Rcf.py:2(Rcf) 1000000 0.250 0.000 0.342 0.000 Rcf.py:23(<lambda>) 1 0.130 0.130 0.130 0.130 Rcf.py:16(<listcomp>) 1000000 0.093 0.000 0.093 0.000 {built-in method math.exp} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 2 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) 1 0.000 0.000 0.000 0.000 {built-in method _imp.create_builtin} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) 1 0.000 0.000 0.000 0.000 {built-in method builtins.print} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:103(release) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) 4 0.000 0.000 0.000 0.000 {built-in method builtins.getattr} 4 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:369(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) 2 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock} 3 0.000 0.000 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 0.000 0.000 {built-in method builtins.any} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message) 2 0.000 0.000 0.000 0.000 {method 'rpartition' of 'str' objects} 3 0.000 0.000 0.000 0.000 {built-in method _imp.release_lock} 1 0.000 0.000 0.000 0.000 {built-in method _imp.is_builtin} 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:307(__init__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__) 4 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:143(__init__) 2 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects} 2 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident} 1 0.000 0.000 0.000 0.000 {built-in method _imp.exec_builtin} 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:424(has_location) 1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:753(is_package) ###Markdown **Nota:** Recordar que el output de `CProfile` con la flag `-s cumulative` estรก ordenando por el gasto en tiempo de las funciones que son llamadas en el bloque de cรณdigo analizado. No estรก ordenando por parent functions. Para tener un output en el que se tenga quรฉ funciones llaman a quรฉ otras se puede utilizar lo siguiente: ###Code %%bash python3 -m cProfile -o profile.stats Rcf.py import pstats p = pstats.Stats("profile.stats") p.sort_stats("cumulative") p.print_stats() p.print_callers() ###Output Ordered by: cumulative time Function was called by... ncalls tottime cumtime {built-in method builtins.exec} <- Rcf.py:1(<module>) <- 1 0.014 0.630 {built-in method builtins.exec} Rcf.py:2(Rcf) <- 1 0.161 0.616 Rcf.py:1(<module>) Rcf.py:23(<lambda>) <- 1000000 0.250 0.342 Rcf.py:2(Rcf) Rcf.py:16(<listcomp>) <- 1 0.113 0.113 Rcf.py:2(Rcf) {built-in method math.exp} <- 1000000 0.092 0.092 Rcf.py:23(<lambda>) <frozen importlib._bootstrap>:966(_find_and_load) <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:651(_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <frozen importlib._bootstrap>:564(module_from_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:147(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:728(create_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) <frozen importlib._bootstrap>:211(_call_with_frames_removed) <- 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) {built-in method _imp.create_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:157(_get_module_lock) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) <frozen importlib._bootstrap>:78(acquire) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) {method 'format' of 'str' objects} <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:870(_find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) <frozen importlib._bootstrap>:707(find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) <frozen importlib._bootstrap>:433(spec_from_loader) <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) {built-in method builtins.print} <- 1 0.000 0.000 Rcf.py:1(<module>) <frozen importlib._bootstrap>:504(_init_module_attrs) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) <frozen importlib._bootstrap>:58(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) <frozen importlib._bootstrap>:318(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:151(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) {built-in method builtins.hasattr} <- 2 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:103(release) <- 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) {built-in method builtins.any} <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) {built-in method builtins.getattr} <- 4 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:736(exec_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) {built-in method _imp.acquire_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) <frozen importlib._bootstrap>:369(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) <frozen importlib._bootstrap>:143(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:176(cb) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) <frozen importlib._bootstrap>:222(_verbose_message) <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) {method 'get' of 'dict' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) {method 'rpartition' of 'str' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) {built-in method _thread.allocate_lock} <- 2 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) {built-in method _imp.release_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) {built-in method _imp.is_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) {built-in method _imp.exec_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:307(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:311(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) <frozen importlib._bootstrap>:416(parent) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:843(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) <frozen importlib._bootstrap>:847(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) {built-in method _thread.get_ident} <- 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 <frozen importlib._bootstrap>:103(release) {method 'disable' of '_lsprof.Profiler' objects} <- <frozen importlib._bootstrap>:321(<genexpr>) <- 4 0.000 0.000 {built-in method builtins.any} <frozen importlib._bootstrap>:424(has_location) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) <frozen importlib._bootstrap>:753(is_package) <- 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) ###Markdown y podemos tambiรฉn tener la informaciรณn de a quรฉ funciones llamรณ cada funciรณn ###Code p.print_callees() ###Output Ordered by: cumulative time Function called... ncalls tottime cumtime {built-in method builtins.exec} -> 1 0.014 0.630 Rcf.py:1(<module>) Rcf.py:1(<module>) -> 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load) 1 0.161 0.616 Rcf.py:2(Rcf) 1 0.000 0.000 {built-in method builtins.print} 1 0.000 0.000 {method 'format' of 'str' objects} Rcf.py:2(Rcf) -> 1 0.113 0.113 Rcf.py:16(<listcomp>) 1000000 0.250 0.342 Rcf.py:23(<lambda>) Rcf.py:23(<lambda>) -> 1000000 0.092 0.092 {built-in method math.exp} Rcf.py:16(<listcomp>) -> {built-in method math.exp} -> <frozen importlib._bootstrap>:966(_find_and_load) -> 1 0.000 0.000 <frozen importlib._bootstrap>:143(__init__) 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__) 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb) 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked) 1 0.000 0.000 {method 'get' of 'dict' objects} <frozen importlib._bootstrap>:936(_find_and_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked) 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec) 1 0.000 0.000 {method 'rpartition' of 'str' objects} <frozen importlib._bootstrap>:651(_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:307(__init__) 1 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__) 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module) 1 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:564(module_from_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs) 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module) 1 0.000 0.000 {built-in method builtins.hasattr} <frozen importlib._bootstrap>:147(__enter__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire) 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock) <frozen importlib._bootstrap>:728(create_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) <frozen importlib._bootstrap>:211(_call_with_frames_removed) -> 1 0.000 0.000 {built-in method _imp.create_builtin} 1 0.000 0.000 {built-in method _imp.exec_builtin} {built-in method _imp.create_builtin} -> <frozen importlib._bootstrap>:157(_get_module_lock) -> 1 0.000 0.000 <frozen importlib._bootstrap>:58(__init__) 1 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 {built-in method _imp.release_lock} <frozen importlib._bootstrap>:78(acquire) -> 1 0.000 0.000 {built-in method _thread.get_ident} {method 'format' of 'str' objects} -> <frozen importlib._bootstrap>:870(_find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec) 1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__) 1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__) <frozen importlib._bootstrap>:707(find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader) 1 0.000 0.000 {built-in method _imp.is_builtin} <frozen importlib._bootstrap>:433(spec_from_loader) -> 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) 1 0.000 0.000 <frozen importlib._bootstrap>:369(__init__) 2 0.000 0.000 {built-in method builtins.hasattr} {built-in method builtins.print} -> <frozen importlib._bootstrap>:504(_init_module_attrs) -> 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent) 1 0.000 0.000 <frozen importlib._bootstrap>:424(has_location) 4 0.000 0.000 {built-in method builtins.getattr} <frozen importlib._bootstrap>:58(__init__) -> 2 0.000 0.000 {built-in method _thread.allocate_lock} <frozen importlib._bootstrap>:318(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message) 1 0.000 0.000 {built-in method builtins.any} <frozen importlib._bootstrap>:151(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:103(release) {built-in method builtins.hasattr} -> <frozen importlib._bootstrap>:103(release) -> 1 0.000 0.000 {built-in method _thread.get_ident} <frozen importlib._bootstrap>:232(_requires_builtin_wrapper) -> 1 0.000 0.000 <frozen importlib._bootstrap>:753(is_package) {built-in method builtins.any} -> 4 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>) {built-in method builtins.getattr} -> <frozen importlib._bootstrap>:736(exec_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed) {built-in method _imp.acquire_lock} -> <frozen importlib._bootstrap>:369(__init__) -> <frozen importlib._bootstrap>:143(__init__) -> <frozen importlib._bootstrap>:176(cb) -> 1 0.000 0.000 {built-in method _imp.acquire_lock} 1 0.000 0.000 {built-in method _imp.release_lock} 1 0.000 0.000 {method 'get' of 'dict' objects} <frozen importlib._bootstrap>:222(_verbose_message) -> {method 'get' of 'dict' objects} -> {method 'rpartition' of 'str' objects} -> {built-in method _thread.allocate_lock} -> {built-in method _imp.release_lock} -> {built-in method _imp.is_builtin} -> {built-in method _imp.exec_builtin} -> <frozen importlib._bootstrap>:307(__init__) -> <frozen importlib._bootstrap>:311(__enter__) -> <frozen importlib._bootstrap>:416(parent) -> 1 0.000 0.000 {method 'rpartition' of 'str' objects} <frozen importlib._bootstrap>:843(__enter__) -> 1 0.000 0.000 {built-in method _imp.acquire_lock} <frozen importlib._bootstrap>:847(__exit__) -> 1 0.000 0.000 {built-in method _imp.release_lock} {built-in method _thread.get_ident} -> {method 'disable' of '_lsprof.Profiler' objects} -> <frozen importlib._bootstrap>:321(<genexpr>) -> <frozen importlib._bootstrap>:424(has_location) -> <frozen importlib._bootstrap>:753(is_package) -> ###Markdown El comando de magic es `%prun`: ###Code %prun -s cumulative Rcf(f,0,1,n) ###Output ###Markdown 2) Uso de line_profiler `line_profiler` trabaja perfilando el cรณdigo de forma individual funciones lรญnea por lรญnea. La idea serรญa perfilar primero con `CProfile` al programa para identificar aquellas funciones que gastan un mayor tiempo de ejecuciรณn y posteriormente perfilarlas con `line_profiler`. **Comentario:** una buena prรกctica es guardar las diferentes versiones de tu cรณdigo cuando vas modificรกndolo para tener un registro de tus cambios. Puede ejecutarse desde la lรญnea de comandos o cargarse en IPython con el comando magic `load_ext`: ###Code %load_ext line_profiler %lprun? ###Output _____no_output_____ ###Markdown En el siguiente output:* La columna `%Time` contiene el porcentaje de tiempo gastado. En el caso que se perfila, la lรญnea`sum_res=sum_res+f(node)` es en la que mรกs porcentaje del tiempo se gasta. Seguida de la lรญnea del `for` y de la lรญnea donde se hace uso de [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions) para crear a los nodos de integraciรณn numรฉrica. ###Code %lprun -f Rcf Rcf(f,0,1,n) ###Output _____no_output_____ ###Markdown Con la evidencia generada con `line_profiler` ยฟpodrรญamos escribir una funciรณn que fuera mรกs rรกpida? Lo primero que podemos hacer es utilizar un [generator](https://wiki.python.org/moin/Generators) en lugar de una lista: ###Code def Rcf2(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf2 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res ###Output _____no_output_____ ###Markdown medir con `%timeit`: ###Code %timeit -n 5 -r 10 Rcf2(f,0,1,n) ###Output 434 ms ยฑ 48.4 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown revisar que estรก correcta esta nueva implementaciรณn: ###Code err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown perfilarla con `line_profiler`: ###Code %lprun -f Rcf2 Rcf2(f,0,1,n) ###Output _____no_output_____ ###Markdown y observar que la lรญnea en la que se creaba la lista ahora es despreciable el porcentaje de tiempo que se gasta en ella. Podemos hacer una implementaciรณn que se encargue del gasto del tiempo en la lรญnea del `for`: ###Code def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum((f(node) for node in nodes)) return h_hat*suma_res ###Output _____no_output_____ ###Markdown medir con `%timeit`: ###Code %timeit -n 5 -r 10 Rcf3(f,0,1,n) ###Output 476 ms ยฑ 78.9 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown revisar que estรก correcta esta nueva implementaciรณn: ###Code err_relativo(aprox,obj) ###Output _____no_output_____ ###Markdown perfilarla con `line_profiler`: ###Code %lprun -f Rcf3 Rcf3(f,0,1,n) ###Output _____no_output_____ ###Markdown y se tiene la mayorรญa del porcentaje de tiempo ahora en una sola lรญnea. Recuรฉrdese que el resultado de `Cprofile` indicรณ que se llama a la funciรณn `lambda` y `math.exp` $n=10^6$ veces. Una implementaciรณn de la regla del rectรกngulo con menor nรบmero de llamadas a funciones (y por tanto menor tiempo) serรญa: ###Code def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res %lprun -f Rcf4 Rcf4(0,1,n) %timeit -n 5 -r 10 Rcf4(0,1,n) ###Output 343 ms ยฑ 55.1 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each) ###Markdown Si bien esta implementaciรณn es la mรกs rรกpida no es tan flexible pues estรก calculando la regla del rectรกngulo para una funciรณn definida dentro de la misma funciรณn. Si quisiรฉramos calcular la regla para otra funciรณn se tendrรญa que directamente modificar la funciรณn `Rcf` lo cual no es flexible. Aunque `Rcf4` es mรกs rรกpida preferimos `Rcf3` por su flexibilidad y menor uso de recursos (que se verรก con el `memory_profiler` mรกs adelante). **Ejemplo de ejecuciรณn de line_profiler desde la lรญnea de comandos:** ###Code %%file Rcf4.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con line_profiler def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res if __name__ == "__main__": n=10**6 print("aproximaciรณn: {:0.6e}".format(Rcf4(0,1,n))) %%bash $HOME/.local/bin/kernprof -l -v Rcf4.py ###Output aproximaciรณn: 7.468241e-01 Wrote profile results to Rcf4.py.lprof Timer unit: 1e-06 s Total time: 0.812943 s File: Rcf4.py Function: Rcf4 at line 2 Line # Hits Time Per Hit % Time Line Contents ============================================================== 2 @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con line_profiler 4 def Rcf4(a,b,n): 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n 9 Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors 10 Args: 11 f (lambda expression): lambda expression of integrand 12 a (int): left point of interval 13 b (int): right point of interval 14 n (int): number of subintervals 15 Returns: 16 Rcf4 (float) 17 """ 18 1 3.0 3.0 0.0 h_hat=(b-a)/n 19 1 5.0 5.0 0.0 nodes=(a+(i+1/2)*h_hat for i in range(0,n)) 20 1 812933.0 812933.0 100.0 suma_res = sum(((math.exp(-node**2) for node in nodes))) 21 1 2.0 2.0 0.0 return h_hat*suma_res ###Markdown Observese en el output de `CProfile` siguiente para la funciรณn `Rcf4` que las lรญneas con mayor gasto en el tiempo total son: ``` nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes)))``` ###Code import math def Rcf4(a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf4 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes))) return h_hat*suma_res %prun -s cumulative Rcf4(0,1,n) ###Output ###Markdown Uso de memoria RAM Al realizar anรกlisis del uso de memoria de tu cรณdigo podemos responder preguntas como:* ยฟEs posible utilizar menos RAM al reescribir mi funciรณn para que trabaje mรกs eficientemente?* ยฟPodemos usar mรกs RAM para aprovechar mejor el uso del cachรฉ? 1)Uso de `%memit` Es equivalente a `%timeit` en el sentido que realiza una serie de repeticiones para obtener un resultado estable del bloque de cรณdigo analizado. ###Code %load_ext memory_profiler %memit? ###Output _____no_output_____ ###Markdown Primero medimos cuรกnto RAM estรก utilizando el proceso del notebook: ###Code %memit #how much RAM this process is consuming ###Output peak memory: 112.13 MiB, increment: 0.00 MiB ###Markdown Y podemos realizar mediciones para cada una de las implementaciones de la regla del rectรกngulo: ###Code %memit -c Rcf(f,0,1,n) %memit -c Rcf2(f,0,1,n) %memit -c Rcf3(f,0,1,10**5) %memit -c Rcf4(0,1,10**5) ###Output peak memory: 201.55 MiB, increment: 88.66 MiB ###Markdown El uso de `generators` nos ayuda a disminuir la cantidad de memoria RAM usada por nuestro proceso. 2) Uso de `memory_profiler` Para mediciรณn de memoria lรญnea por lรญnea utilizamos `memory_profiler`. Se ejecuta mรกs lento que `line_profiler` (entre $10$ y $100$ veces mรกs lento!) y mejora su velocidad de ejecuciรณn al instalar el paquete `psutil`. Con lรญnea de comandos se ejecuta como sigue: ###Code %%file Rcf_memory_profiler.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con memory_profiler def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ h_hat=(b-a)/n nodes=[a+(i+1/2)*h_hat for i in range(0,n)] sum_res=0 for node in nodes: sum_res=sum_res+f(node) return h_hat*sum_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n))) ###Output Writing Rcf_memory_profiler.py ###Markdown En el output siguiente se observa que la lรญnea que mรกs incrementa la cantidad de RAM alojada para el proceso que contiene la ejecuciรณn de la funciรณn `Rcf` es la creaciรณn de la lista de nodos `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`. **Cuidado:** el valor de la columna `Increment` para esta lรญnea no necesariamente indica que la lista `nodes` ocupa en memoria $512 MB$'s, sรณlo que para la alocaciรณn de la lista el proceso creciรณ en $512 MB$'s**Nota:** en el output aparece $MiB$ que son mebibytes. Aunque no se cumple que un mebibyte sea igual a un megabyte, se toma en este comentario como megabytes pues la diferencia entre estas unidades es sutil. ###Code %%bash python3 -m memory_profiler Rcf_memory_profiler.py ###Output aproximaciรณn: 7.468241e-01 Filename: Rcf_memory_profiler.py Line # Mem usage Increment Line Contents ================================================ 2 37.750 MiB 37.750 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con memory_profiler 4 def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n 9 Args: 10 f (lambda expression): lambda expression of integrand 11 a (int): left point of interval 12 b (int): right point of interval 13 n (int): number of subintervals 14 Returns: 15 Rcf (float) 16 """ 17 37.750 MiB 0.000 MiB h_hat=(b-a)/n 18 69.012 MiB 0.512 MiB nodes=[a+(i+1/2)*h_hat for i in range(0,n)] 19 69.012 MiB 0.000 MiB sum_res=0 20 69.012 MiB 0.000 MiB for node in nodes: 21 69.012 MiB 0.000 MiB sum_res=sum_res+f(node) 22 69.012 MiB 0.000 MiB return h_hat*sum_res ###Markdown Como ya se habรญa notado, los generators ahorran memoria: ###Code %%file Rcf3_memory_profiler.py import math @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn #desea perfilarse con memory_profiler def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ h_hat=(b-a)/n nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum((f(node) for node in nodes)) return h_hat*suma_res if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf3 n=10**6 f=lambda x: math.exp(-x**2) print("aproximaciรณn: {:0.6e}".format(Rcf3(f,0,1,n))) ###Output Writing Rcf3_memory_profiler.py ###Markdown En el output siguiente el proceso que involucra la ejecuciรณn de la funciรณn `Rcf3` no incrementa el uso de memoria RAM por el uso de generators: ###Code %%bash python3 -m memory_profiler Rcf3_memory_profiler.py ###Output aproximaciรณn: 7.468241e-01 Filename: Rcf3_memory_profiler.py Line # Mem usage Increment Line Contents ================================================ 2 37.590 MiB 37.590 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn 3 #desea perfilarse con memory_profiler 4 def Rcf3(f,a,b,n): 5 """ 6 Compute numerical approximation using rectangle or mid-point method in 7 an interval. 8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n 9 Args: 10 f (lambda expression): lambda expression of integrand 11 a (int): left point of interval 12 b (int): right point of interval 13 n (int): number of subintervals 14 Returns: 15 Rcf3 (float) 16 """ 17 37.590 MiB 0.000 MiB h_hat=(b-a)/n 18 37.590 MiB 0.000 MiB nodes=(a+(i+1/2)*h_hat for i in range(0,n)) 19 37.590 MiB 0.000 MiB suma_res = sum((f(node) for node in nodes)) 20 37.590 MiB 0.000 MiB return h_hat*suma_res ###Markdown 3) Uso de heapy Con `heapy` podemos revisar el nรบmero y tamaรฑo de cada objeto que estรก en el heap de Python (ver [liga](https://docs.python.org/3/c-api/memory.html) y [liga2](https://stackoverflow.com/questions/14546178/does-python-have-a-stack-heap-and-how-is-memory-managed) para memory management). Tambiรฉn ayuda a encontrar **memory leaks** que ocurren si apuntamos a un objeto al que ya no deberรญamos estar apuntando... ver [liga3](https://en.wikipedia.org/wiki/Memory_leak) para saber quรฉ son las memory leaks. ###Code import math from guppy import hpy def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf (float) """ hp=hpy() h_hat=(b-a)/n h=hp.heap() print("beginning of Rcf") print(h) nodes=[a+(i+1/2)*h_hat for i in range(0,n)] h=hp.heap() print("After creating list") print(h) sum_res=0 for node in nodes: sum_res=sum_res+f(node) h=hp.heap() print("After loop") print(h) return h_hat*sum_res Rcf(f,0,1,n) import math from guppy import hpy def Rcf3(f,a,b,n): """ Compute numerical approximation using rectangle or mid-point method in an interval. Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n Args: f (lambda expression): lambda expression of integrand a (int): left point of interval b (int): right point of interval n (int): number of subintervals Returns: Rcf3 (float) """ hp=hpy() h_hat=(b-a)/n h=hp.heap() print("beginning of Rcf3") print(h) nodes=(a+(i+1/2)*h_hat for i in range(0,n)) h=hp.heap() print("After creating generator") print(h) suma_res = sum((f(node) for node in nodes)) h=hp.heap() print("After loop") print(h) return h_hat*suma_res Rcf3(f,0,1,n) ###Output beginning of Rcf3 Partition of a set of 451930 objects. Total size = 56178992 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123040 27 9506016 17 26823863 48 tuple 2 54025 12 4265717 8 31089580 55 bytes 3 27255 6 3942936 7 35032516 62 types.CodeType 4 25716 6 3497376 6 38529892 69 function 5 3155 1 3112744 6 41642636 74 type 6 6819 2 2830712 5 44473348 79 dict (no owner) 7 1244 0 1935072 3 46408420 83 dict of module 8 3155 1 1578376 3 47986796 85 dict of type 9 2286 1 846912 2 48833708 87 set <1047 more rows. Type e.g. '_.more' to view.> After creating generator Partition of a set of 451952 objects. Total size = 56180784 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123041 27 9506072 17 26823919 48 tuple 2 54025 12 4265717 8 31089636 55 bytes 3 27255 6 3942936 7 35032572 62 types.CodeType 4 25716 6 3497376 6 38529948 69 function 5 3155 1 3112744 6 41642692 74 type 6 6820 2 2830952 5 44473644 79 dict (no owner) 7 1244 0 1935072 3 46408716 83 dict of module 8 3155 1 1578376 3 47987092 85 dict of type 9 2286 1 846912 2 48834004 87 set <1049 more rows. Type e.g. '_.more' to view.> After loop Partition of a set of 451944 objects. Total size = 56179648 bytes. Index Count % Size % Cumulative % Kind (class / dict of class) 0 126664 28 17317847 31 17317847 31 str 1 123040 27 9506016 17 26823863 48 tuple 2 54025 12 4265717 8 31089580 55 bytes 3 27255 6 3942936 7 35032516 62 types.CodeType 4 25716 6 3497376 6 38529892 69 function 5 3155 1 3112744 6 41642636 74 type 6 6819 2 2830712 5 44473348 79 dict (no owner) 7 1244 0 1935072 3 46408420 83 dict of module 8 3155 1 1578376 3 47986796 85 dict of type 9 2286 1 846912 2 48833708 87 set <1047 more rows. Type e.g. '_.more' to view.>
Modulo2/2. Funciones Python.ipynb
###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a + b # lista con valores a ser sumados numeros_sumar = [23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 x.append(99) # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list my_list.copy() # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? fdfsd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def par_o_impar(): numero = int(input("Escriba un nรบmero entero: ")) if 0 > numero: print(f"Le he pedido un nรบmero entero mayor 0") else: if numero % 2 == 0: print(f"El nรบmero {numero} es par.") else: print(f"El nรบmero {numero} es impar.") par_o_impar() def par_impar(number): if (number % 2 == 0): print("El numero es par") else: print("El numero es impar") while True: try: number = int(input("Ingrese un numero")) break except: print('valor ingresado no corresponde a un numero entero') par_impar(number) ###Output El numero es par ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base,altura): return base * altura area=area_rectangulo(15,10) print(f'El รกrea del rectรกngulo es: {area}') ###Output El รกrea del rectรกngulo es: 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def rel(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print(rel(5,10)) print(rel(10,5)) print(rel(5,5)) ###Output -1 1 0 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(numero): f = 1 if numero == 0: print(f'{numero}! = {f}') elif numero > 0: for i in range(1, numero+1): f = f * i print(f'{numero}! = {f}') elif numero < 0: print('El nรบmero ingresado no es un entero no negativo') while True: try: numero = int(input('Ingrese el nรบmero factorial: ')) break except: print('El nรบmero ingresado no es un entero') factorial(numero) def factorial(num): if num > 1: num = num * factorial(num -1) elif num==0: num= 1 return num num = float(input('Ingresa el nรบmero: ')) c = factorial(num) print(f'{c}') ###Output Ingresa el nรบmero: 4 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a + b # lista con valores a ser sumados numeros_sumar = [23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 x.append(99) # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list my_list.copy() # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? fdfsd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def par_o_impar(): numero = int(input("Escriba un nรบmero entero: ")) if 0 > numero: print(f"Le he pedido un nรบmero entero mayor 0") else: if numero % 2 == 0: print(f"El nรบmero {numero} es par.") else: print(f"El nรบmero {numero} es impar.") par_o_impar() def par_impar(number): if (number % 2 == 0): print("El numero es par") else: print("El numero es impar") while True: try: number = int(input("Ingrese un numero")) break except: print('valor ingresado no corresponde a un numero entero') par_impar(number) ###Output El numero es par ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base,altura): return base * altura area=area_rectangulo(15,10) print(f'El รกrea del rectรกngulo es: {area}') ###Output El รกrea del rectรกngulo es: 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def rel(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print(rel(5,10)) print(rel(10,5)) print(rel(5,5)) ###Output -1 1 0 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(numero): f = 1 if numero == 0: print(f'{numero}! = {f}') elif numero > 0: for i in range(1, numero+1): f = f * i print(f'{numero}! = {f}') elif numero < 0: print('El nรบmero ingresado no es un entero no negativo') while True: try: numero = int(input('Ingrese el nรบmero factorial: ')) break except: print('El nรบmero ingresado no es un entero') factorial(numero) def factorial(num): if num > 1: num = num * factorial(num -1) elif num==0: num= 1 return num num = float(input('Ingresa el nรบmero: ')) c = factorial(num) print(f'{c}') ###Output Ingresa el nรบmero: 4 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list my_list.copy() # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def par_o_impar(): numero = int(input("Escriba un nรบmero entero: ")) if 0 > numero: print(f"Le he pedido un nรบmero entero mayor 0") else: if numero % 2 == 0: print(f"El nรบmero {numero} es par.") else: print(f"El nรบmero {numero} es impar.") par_o_impar() def par_impar(number): if (number % 2 == 0): print("El numero es par") else: print("El numero es impar") while True: try: number = int(input("Ingrese un numero: ")) break except: print('valor ingresado no corresponde a un numero entero:') par_impar(number) ###Output El numero es par ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base,altura): return base * altura area=area_rectangulo(15,10) print(f'El รกrea del rectรกngulo es: {area}') ###Output El รกrea del rectรกngulo es: 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def rel(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print(rel(5,10)) print(rel(10,5)) print(rel(5,5)) ###Output -1 1 0 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(numero): f = 1 if numero == 0: print(f'{numero}! = {f}') elif numero > 0: for i in range(1, numero+1): f = f * i print(f'{numero}! = {f}') elif numero < 0: print('El nรบmero ingresado no es un entero no negativo') while True: try: numero = int(input('Ingrese el nรบmero factorial: ')) break except: print('El nรบmero ingresado no es un entero') factorial(numero) def factorial(num): if num > 1: num = num * factorial(num -1) elif num==0: num= 1 return num num = float(input('Ingresa el nรบmero: ')) c = factorial(num) print(f'{c}') ###Output Ingresa el nรบmero: 12 479001600.0 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def par_o_impar(n): if (n%2==0): print("El nรบmero es par") else: print("El nรบmero es impar") n=int(input("Ingrese un nรบmero")) par_o_impar(n) ###Output Ingrese un nรบmero 7 ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base, altura): return base*altura print( area_rectangulo(15,10) ) ###Output 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def rel(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print(rel(5,10)) print(rel(10,5)) print(rel(5,5)) ###Output -1 1 0 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(num): if num > 1: num = num * factorial(num -1) elif num==0: num= 1 return num num = int(input('Ingresa el nรบmero: ')) c = factorial(num) print(f'{c}') ###Output Ingresa el nรบmero: 5 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a + b # lista con valores a ser sumados numeros_sumar = [23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 x.append(99) # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list my_list.copy() # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? fdfsd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def par_o_impar(): numero = int(input("Escriba un nรบmero entero: ")) if 0 > numero: print(f"Le he pedido un nรบmero entero mayor 0") else: if numero % 2 == 0: print(f"El nรบmero {numero} es par.") else: print(f"El nรบmero {numero} es impar.") par_o_impar() def par_impar(number): if (number % 2 == 0): print("El numero es par") else: print("El numero es impar") while True: try: number = int(input("Ingrese un numero")) break except: print('valor ingresado no corresponde a un numero entero') par_impar(number) ###Output El numero es par ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base,altura): return base * altura area=area_rectangulo(15,10) print(f'El รกrea del rectรกngulo es: {area}') ###Output El รกrea del rectรกngulo es: 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def rel(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print(rel(5,10)) print(rel(10,5)) print(rel(5,5)) ###Output -1 1 0 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(numero): f = 1 if numero == 0: print(f'{numero}! = {f}') elif numero > 0: for i in range(1, numero+1): f = f * i print(f'{numero}! = {f}') elif numero < 0: print('El nรบmero ingresado no es un entero no negativo') while True: try: numero = int(input('Ingrese el nรบmero factorial: ')) break except: print('El nรบmero ingresado no es un entero') factorial(numero) def factorial(num): if num > 1: num = num * factorial(num -1) elif num==0: num= 1 return num num = float(input('Ingresa el nรบmero: ')) c = factorial(num) print(f'{c}') ###Output Ingresa el nรบmero: 4 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def poi(a): if (a % 2 == 0): print(f'El nรบmero {a} es par') else: print(f'El nรบmero {a} es impar') a = int(input('Insertar un nรบmero: ')) poi(a) ###Output Insertar un nรบmero: 2 ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base, altura): return base * altura area = area_rectangulo(15, 10) print(f'El รกrea del rectรกngulo es: {area}') ###Output El รกrea del rectรกngulo es: 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def relacion(a,b): if a > b: return 1 elif b < a: return -1 else: return 0 a = float(input('Ingresa el primer nรบmero: ')) b = float(input('Ingresa el segundo nรบmero: ')) c = relacion(a,b) print(f'{c}') ###Output Ingresa el primer nรบmero: 5 Ingresa el segundo nรบmero: 2 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(numero): f = 1 if numero == 0: print(f'{numero}! = {f}') elif numero > 0: for i in range(1, numero+1): f = f * i print(f'{numero}! = {f}') elif numero < 0: print('El nรบmero ingresado no es un entero no negativo') while True: try: numero = int(input('Ingrese el nรบmero factorial: ')) break except: print('El nรบmero ingresado no es un entero') factorial(numero) ###Output Ingrese el nรบmero factorial: 5 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional return NoImplementError ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre+ ' ' + apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=100, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): return x + 90 # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'}) ###Output 5 Hola [1, 2, 3, 4, 5] {'dia': 'sabado'} ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) sumar(numeros_sumar[0],numeros_sumar[1]) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe * (1 - descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) calcular(datos['importe'], datos['descuento']) ###Output _____no_output_____ ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200}) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) datos_lista =['Gonzalo', 26] "Hola {0}, tu edad es {1}".format(*datos_lista) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 #print('valor de x dentro de la funcion "bar" es ', x) return x x = 3 bar(x) print('valor de x a nivel global es ', x) x = bar(x) print(x) ###Output 93 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) my_var ###Output _____no_output_____ ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x : list): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto my_list = [1, 2, 3] my_list2 = my_list.copy() foo(my_list2) my_list my_list2 ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta def foo(): global x x = 42 # reasignacion total x print('valor de x final', x) return x # llamo a la funcion x = 7 foo() print(x) ###Output valor de x final 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento : int =1 ): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? rojo ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code num = 3 def numero_par(num): if num % 2 == 0: print(f'El numero es par {num}') else: print(f'el numero es impar {num}') # numero es par o impar? numero_par(num) ###Output el numero es impar 3 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def es_par(numero): if numero%2==0: print("es un numero par") else: print("es un numero impar") numero = int(input('ingrese un numero: ')) es_par(numero) ###Output es un numero par ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base, altura): return base * altura base = 15 altura = 10 area_rectangulo(base, altura) ###Output _____no_output_____ ###Markdown Crear una funcion para sumar elementos de una lista ###Code list_n = [20, 10, 30, 40, 50 ,60] suma = 0 for n in list_n: suma = suma + n suma ###Output _____no_output_____ ###Markdown en funcion ###Code def sumar_elemetos_lista(lista_numeros): suma = 0 for n in lista_numeros: suma = suma + n return suma list_n = [20, 10, 30, 40, 50 ,60] sumar_elemetos_lista(list_n) ###Output _____no_output_____ ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code list_n = [20, 10, 30, 40, 50 ,60] ###Output _____no_output_____ ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code numero = 5 factorial = 1 for n in range(numero): factorial = factorial * (n+1) print(f'factorial de {n+1} es {factorial}') factorial ###Output factorial de 1 es 1 factorial de 2 es 2 factorial de 3 es 6 factorial de 4 es 24 factorial de 5 es 120 ###Markdown creando funcion ###Code def factorial_n(numero): factorial = 1 for n in range(numero): factorial = factorial * (n+1) print(f'factorial de {n+1} es {factorial}') return factorial factorial_n(5) ###Output factorial de 1 es 1 factorial de 2 es 2 factorial de 3 es 6 factorial de 4 es 24 factorial de 5 es 120 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code num = input("Introduce un nรบmero: ") num = int(num) if num == 0: print ("Este nรบmero es par.") elif num%2 == 0: print ("Este numero es par") else: print ("Este numero es impar") ###Output _____no_output_____ ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rec(b, h): return b*h print(area_rec(15,10)) ###Output 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def rel(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print(rel(5,10)) print(rel(10,5)) print(rel(5,5)) def relacion(a,b): if a > b: return 1 elif b < a: return -1 elif a == b: return 0 a = float(input('Ingresa el primer nรบmero: ')) b = float(input('Ingresa el segundo nรบmero: ')) c = relacion(a,b) print(f'{c}') ###Output Ingresa el primer nรบmero: 5 Ingresa el segundo nรบmero: 10 ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(num): if num > 1: num = num * factorial(num -1) return num num = float(input('Ingresa el nรบmero: ')) c = factorial(num) print(f'{c}') ###Output Ingresa el nรบmero: 5 ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo #pass # uso del pass es opcional return NoImplementError ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = "Bienvenido {} {}".format(nombre , apellido) print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(numero1, numero2): # valores que se reciben return numero1 + numero2 a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code print(resta(100,10)) # 100 - 10 resta(b=100, a=10) # 10 -100 ###Output 90 ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): return x + 90 # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def sumar(a,b): return a +b def suma_indeterminado(*args): sumatoria = 0 for arg in args: sumatoria += arg return sumatoria listado_numeros = [23, 12,2,5,7] suma_indeterminado(23, 12,2,5,7) suma_indeterminado(*listado_numeros) def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'}) ###Output 5 Hola [1, 2, 3, 4, 5] {'dia': 'sabado'} ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) sumar(numeros_sumar[0],numeros_sumar[1]) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe * (1 - descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) calcular(datos['importe'], datos['descuento']) ###Output _____no_output_____ ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200}) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) datos_lista =['Gonzalo', 26] "Hola {0}, tu edad es {1}".format(*datos_lista) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 #print('valor de x dentro de la funcion "bar" es ', x) return x x = 3 x = bar(x) print('valor de x a nivel global es ', x) x = bar(x) print(x) ###Output 183 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) my_var ###Output _____no_output_____ ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x : list): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto my_list = [1, 2, 3] my_list2 = my_list.copy() foo(my_list2) my_list my_list2 # Valor puede def foo2(x : list): x[0] = x[0] * 99 return x # lista original my_list = [1, 2, 3] lista_y = foo2(my_list.copy()) my_list lista_y ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta def foo(): global x x = 42 # reasignacion total x print('valor de x final', x) return x # llamo a la funcion x = 7 foo() print(x) x= 3 a = x a = 2 print(x) print(a) lista_a = [1,2,3,4] lista_b = lista_a.copy() lista_b[0] = "Hola" print(lista_b) print(lista_a) ###Output [1, 2, 3, 4] ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento : int =1 ): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") def sumaRecursiva(n): if n==1: return 1 else: return n+sumaRecursiva(n-1) jugar() jugar() ###Output _____no_output_____ ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def par_impar(numero): if numero % 2 == 0: print(f"El nรบmero {numero} es par") else: print(f"el numero {numero} es impar") numero = int(input("Ingrese un numero: ")) numero par_impar(numero) ###Output el numero 7 es impar ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base, altura): return base * altura / 2 b= 15 h = 10 area = area_rectangulo(b, h) print("El รกrea del triangulo de base = {} y altura = {} es: {}".format(b,h,area)) ###Output El รกrea del triangulo de base = 15 y altura = 10 es: 75.0 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code print() ###Output _____no_output_____ ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre+ ' ' + apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=100, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): return x + 90 # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'}) ###Output 5 Hola [1, 2, 3, 4, 5] {'dia': 'sabado'} ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200}) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) datos_lista =['Gonzalo', 26] "Hola {0}, tu edad es {1}".format(*datos_lista) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 print('valor de x dentro de la funcion "bar" es ', x) x = 3 bar(x) print('valor de x a nivel global es ', x) ###Output valor de x dentro de la funcion "bar" es 93 valor de x a nivel global es 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) my_var ###Output _____no_output_____ ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto my_list = [1, 2, 3] foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta def foo(): global x x = 42 # reasignacion total x print('valor de x final', x) return x # llamo a la funcion x = 7 foo() print(x) ###Output valor de x final 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? blue ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def validar_par(num): if num % 2 == 0: return True return False numero = int(input("Ingrese un nรบmero: ")) numero if validar_par(numero): print('El nรบmero ingresado {} es par'.format(numero)) else: print('El nรบmero ingresado {} es impar'.format(numero)) ###Output El nรบmero ingresado 25 es impar ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code # funciones def area_rectangulo(b, h): return b * h / 2 # mi programa princial base = int(input('Ingrese la base del triangulo: ')) altura = int(input('ingrese la altura del triangulo: ')) area1 = area_rectangulo(base, altura) area2 = area_rectangulo(2, 2) print('area 1: ', area1) print('area 2: ', area2) print('area 3', area_rectangulo(7, 4)) ###Output area 1: 6.0 area 2: 2.0 area 3 14.0 ###Markdown 3.Crear una funcion para sumar elementos de una lista ###Code list_n = [20, 10, 30, 40, 50 ,60] ###Output _____no_output_____ ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(num): fac = 1 for i in range(1,num+1): fac = fac * i return fac n = 6 factorial(n) def factorial(n): if n == 0: return 1 else: return n * factorial(n-1) n=int(input("Ingrese un numero : ")) print(factorial(n)) ###Output Ingrese un numero : 5 ###Markdown 5.Realizar una funciรณn que realice la sucesiรณn de fibonacci:- 0, 1, 1, 2, 3, 5, 8, 13, 21, 34 ###Code def serie_fibo(n): fibonacci = [0,1] for i in range(2,n+1): fibonacci.append(fibonacci[-2] + fibonacci[-1]) return fibonacci n = 9 serie_fibo(n) ###Output _____no_output_____ ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre, apellido): nombre_completo = nombre, apellido print(nombre_completo) mi_funcion('gonzalo','delgado') mi_funcion() # Ejemplo 2 def suma(a, b): # valores que se reciben return a + b a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code resta(b=30, a=10) ###Output _____no_output_____ ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): x = x + 90 return x # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5]) ###Output 5 Hola [1, 2, 3, 4, 5] ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) ###Output 34 ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output n => 5 c => Hola l => [1, 2, 3, 4, 5] ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe - (importe * descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) ###Output 1350.0 ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 x = 3 bar(x) print(x) ###Output 3 ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) ###Output 93 ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto foo(my_list.copy()) my_list ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta x = 7 def foo(): global x x = 42 print(x) # llamo a la funcion foo() print(x) ###Output 42 42 ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento=1): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output ยฟDe quรฉ color es una naranja? asdasd ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. ###Code def main(): print("PARES E IMPARES") numero_1 = int(input("Escriba un nรบmero entero: ")) numero_2 = int(input(f"Escriba un nรบmero entero mayor o igual que {numero_1}: ")) if numero_2 < numero_1: print(f"ยกLe he pedido un nรบmero entero mayor o igual que {numero_1}!") else: for i in range(numero_1, numero_2 + 1): if i % 2 == 0: print(f"El nรบmero {i} es par.") else: print(f"El nรบmero {i} es impar.") if __name__ == "__main__": main() ###Output _____no_output_____ ###Markdown 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: ###Code def area_rectangulo(base, altura): return base*altura print( area_rectangulo(15,10) ) ###Output 150 ###Markdown 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. ###Code def relacion(a, b): if a > b: return 1 elif a < b: return -1 else: return 0 print( relacion(5, 10) ) print( relacion(10, 5) ) print( relacion(5, 5) ) ###Output _____no_output_____ ###Markdown 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 ###Code def factorial(num): print("Valor inicial ->",num) if num > 1: num = num * factorial(num -1) print("valor final ->",num) return num print(factorial(5)) ###Output _____no_output_____ ###Markdown Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal. ###Code def mi_funcion(): # aquรญ mi codigo pass # uso del pass es opcional return NoImplementError ###Output _____no_output_____ ###Markdown Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre: ###Code # defino mi funciรณn def hola(): print("Hola Mundo") # llamo mi funciรณn hola() ###Output Hola Mundo ###Markdown Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable: ###Code # Funciรณn retorna la palabra "Hola Mundo" def funcion(): return "Hola Mundo" # Almaceno el valor devuelto en una variable frase = funcion() print(frase) ###Output Hola Mundo ###Markdown Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno. ###Code def mi_funcion(nombre, apellido): # algoritmo pass ###Output _____no_output_____ ###Markdown Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder: ###Code # Ejemplo funciรณn con parรกmetros def mi_funcion(nombre): print(f"Hola {nombre}") mi_funcion('gonzalo') mi_funcion() # Ejemplo 2 def suma(numero1, numero2): # valores que se reciben return numero1 + numero2 a = 5 b = 6 resultado = suma(a, b) # valores que se envรญan print(resultado) ###Output 11 ###Markdown Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn ###Code # Ejemplo3 def resta(a, b): return a - b # argumento 30 => posiciรณn 0 => parรกmetro a # argumento 10 => posiciรณn 1 => parรกmetro b resta(30, 10) ###Output _____no_output_____ ###Markdown Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente: ###Code print(resta(100,10)) resta(b=100, a=10) ###Output 90 ###Markdown Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando ###Code def bar(x=2): return x + 90 # my_var = 3 print(bar()) # pasando un valor a mi funcion print(bar(6)) ###Output 96 ###Markdown Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores ###Code def indeterminados_posicion(*args): for arg in args: print(arg) indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'}) ###Output _____no_output_____ ###Markdown Cuando se tiene los valores en lista ###Code # valores a ser sumados se encuentran en una lista def sumar(a,b): return a+b # lista con valores a ser sumados numeros_sumar=[23,11] print(sumar(*numeros_sumar)) sumar(numeros_sumar[0],numeros_sumar[1]) ###Output _____no_output_____ ###Markdown kwargs Cuando no se sabe la cantidad de valores ###Code def indeterminados_nombre(**kwargs): for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5]) ###Output _____no_output_____ ###Markdown Valores contenidos en diccionario ###Code def calcular(importe, descuento): return importe * (1 - descuento / 100) datos = { "descuento": 10, "importe": 1500 } print(calcular(**datos)) calcular(datos['importe'], datos['descuento']) ###Output _____no_output_____ ###Markdown Combinando ambos conceptos ###Code def super_funcion(*args,**kwargs): total = 0 for arg in args: total += arg print("sumatorio => ", total) for kwarg in kwargs: print(kwarg, "=>", kwargs[kwarg]) super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200}) datos_persona ={ 'nombre':'Gonzalo', 'edad': 26 } "Hola {nombre}, tu edad es {edad}".format(**datos_persona) datos_lista =['Gonzalo', 26] "Hola {0}, tu edad es {1}".format(*datos_lista) ###Output _____no_output_____ ###Markdown Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor ###Code # Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato def bar(x): x = x + 90 #print('valor de x dentro de la funcion "bar" es ', x) return x x = 3 bar(x) print('valor de x a nivel global es ', x) x = bar(x) print(x) ###Output _____no_output_____ ###Markdown ###Code # Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos def bar(x): return x + 90 my_var = 3 my_var = bar(my_var) print(my_var) my_var ###Output _____no_output_____ ###Markdown Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera: ###Code # Valor puede def foo(x : list): x[0] = x[0] * 99 # lista original my_list = [1, 2, 3] foo(my_list) my_list ###Output _____no_output_____ ###Markdown ###Code # asi se genere una copia simple, esto no soluciona el problema my_list2 = my_list foo(my_list2) my_list2 my_list # se puede solucionar realizando una copia al objeto my_list = [1, 2, 3] my_list2 = my_list.copy() foo(my_list2) my_list my_list2 ###Output _____no_output_____ ###Markdown รmbito de variables en funciones----------------------------------- Ejemplo ###Code # valor de variable global 'x' se mantiene x = 7 def foo(): x = 42 print(x) # llamo a la funcion foo() print(x) # Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable, # se cambia el valor global de esta def foo(): global x x = 42 # reasignacion total x print('valor de x final', x) return x # llamo a la funcion x = 7 foo() print(x) ###Output _____no_output_____ ###Markdown Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo. ###Code def jugar(intento : int =1 ): respuesta = input("ยฟDe quรฉ color es una naranja? ") if respuesta.lower() != "naranja": if intento < 3: print("\nFallaste! Intรฉntalo de nuevo") intento += 1 jugar(intento) # Llamada recursiva else: print("\nPerdiste!") else: print("\nGanaste!") jugar() ###Output _____no_output_____ ###Markdown Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 5.Escribir una funciรณn que, dado un nรบmero de DNI, retorne True si el nรบmero es vรกlido y False si no lo es. Para que un nรบmero de DNI sea vรกlido debe tener entre 7 y 8 dรญgitos. ###Code num_dni = "70612302562" def is_valido_dni(dni): if len(dni) >=7 and len(dni)<=8: return True else: return False num_dni = "70612302" if is_valido_dni(num_dni): print("EL DNI ES VALIDO") else: print("EL DNI ES INVALIDO") ###Output EL DNI ES VALIDO
lecture-06/lab.ipynb
###Markdown [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-06/lab.ipynb) Contents and why we need this labThis lab is about implementing neural networks yourself from scratch. All the modern frameworks for deep learning use automatic differentiation (autodiff) so you don't have to code the backward step yourself. In this version of this lab you will develop your own autodif implementation, and use this to build a simple neural network. Once you've done this lab you should have a very good understanding of what goes on below the hood in the modern framework such as [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) or [JAX](https://github.com/google/jax). In particular the code we'll develop will look quite similar to the pytorch API. External sources of information1. Jupyter notebook. You can find more information about Jupyter notebooks [here](https://jupyter.org/). It will come as part of the [Anaconda](https://www.anaconda.com/) Python installation. You can also use [colab](colab.to), which is a free online jupyter notebook.3. [Nanograd](https://github.com/rasmusbergpalm/nanograd) is a minimalistic version of autodiff developed by Rasmus Berg Palm that we use for our framework. Nanograd automatic differention framework The [Nanograd](https://github.com/rasmusbergpalm/nanograd) framework defines a class Var which both holds a value and gradient value that we can use to store the intermediate values when we apply the chain rule of differentiation. ###Code # Copy and pasted from https://github.com/rasmusbergpalm/nanograd/blob/main/nanograd.py from typing import Union from math import tanh class Var: """ A variable which holds a number and enables gradient computations. """ def __init__(self, val: Union[float, int], parents=None): assert type(val) in {float, int} if parents is None: parents = [] self.v = val self.parents = parents self.grad = 0.0 def backprop(self, bp): self.grad += bp for parent, grad in self.parents: parent.backprop(grad * bp) def backward(self): self.backprop(1.0) def __add__(self: 'Var', other: 'Var') -> 'Var': return Var(self.v + other.v, [(self, 1.0), (other, 1.0)]) def __mul__(self: 'Var', other: 'Var') -> 'Var': return Var(self.v * other.v, [(self, other.v), (other, self.v)]) def __pow__(self, power: Union[float, int]) -> 'Var': assert type(power) in {float, int}, "power must be float or int" return Var(self.v ** power, [(self, power * self.v ** (power - 1))]) def __neg__(self: 'Var') -> 'Var': return Var(-1.0) * self def __sub__(self: 'Var', other: 'Var') -> 'Var': return self + (-other) def __truediv__(self: 'Var', other: 'Var') -> 'Var': return self * other ** -1 def tanh(self) -> 'Var': return Var(tanh(self.v), [(self, 1 - tanh(self.v) ** 2)]) def relu(self) -> 'Var': return Var(self.v if self.v > 0.0 else 0.0, [(self, 1.0 if self.v > 0.0 else 0.0)]) def __repr__(self): return "Var(v=%.4f, grad=%.4f)" % (self.v, self.grad) ###Output _____no_output_____ ###Markdown A few examples illustrate how we can use this: ###Code a = Var(3.0) b = Var(5.0) f = a * b f.backward() for v in [a, b, f]: print(v) a = Var(3.0) b = Var(5.0) c = a * b d = Var(9.0) e = a * d f = c + e f.backward() for v in [a, b, c, d, e, f]: print(v) ###Output Var(v=3.0000, grad=14.0000) Var(v=5.0000, grad=3.0000) Var(v=15.0000, grad=1.0000) Var(v=9.0000, grad=3.0000) Var(v=27.0000, grad=1.0000) Var(v=42.0000, grad=1.0000) ###Markdown Exercise a) What is being calculated? Explain briefly the output of the code? What is the expression we differentiate and with respect to what variables?ยจ For the first example we differentiate f. f with respect to itself, then with respect to a, and then with respect to b. Summing the results, disregarding the trivial case, will, according to the chain rule, gives us the full derivative of f. The same is true for the second example. It's just longer. ###Code from IPython.display import Image ###Output _____no_output_____ ###Markdown Exercise b) How does the backward function work? For the first example above, execute the backward function by hand to convince yourself that it indeed calculates the gradients with respect to the variables. Write down the sequence of calls to backprop for the first example above. ![title](./stuffff.png) Exercise c) What happens if we run backward again? Try to execute the code below. Explain what happens. It does the gradient calculation again, ends at the same results but add them to the previous gradients. Sort of like if we added two function nodes at the top of the chain, wich were just the identity functions. Because then every gradient would be calculated twice, and added. ###Code f.backward() for v in [a, b, c, d, e, f]: print(v) ###Output Var(v=3.0000, grad=28.0000) Var(v=5.0000, grad=6.0000) Var(v=15.0000, grad=2.0000) Var(v=9.0000, grad=6.0000) Var(v=27.0000, grad=2.0000) Var(v=42.0000, grad=2.0000) ###Markdown Exercise d) Test correctness of derivatives with the finite difference methodWrite a small function that uses [the finite difference method](https://en.wikipedia.org/wiki/Finite_difference_method) to numerically compute the gradient:$$\frac{\partial f(x)}{\partial x} \approx \frac{f(x+dx)-f(x)}{dx}$$for a very small $dx$. ###Code def finite_difference(fn, x_val, dx=1e-10): """ Computes the finite difference numerical approximation to the derivative of fn(x) with respect to x at x_val: (fn(x_val + dx) - fn(x_val))/dx """ return (fn(x_val + dx) - fn(x_val))/dx ###Output _____no_output_____ ###Markdown Use your finite difference function to compute the gradient of $f$ with respect to $a$ and $b$ in the following function: $f(x) = a \cdot b + b$, at a=3 and b=5. ###Code # test function - try to change into other functions as well def f(a, b): return a*b + b def f_b(b): return 3*b + b def f_a(a): return a*5+5 finite_difference(f_b, 3) + finite_difference(f_a,5) ###Output _____no_output_____ ###Markdown Write the same function using Nanograd `Var`s and verify that Nanograd computes the same gradients ###Code a = Var(3) b = Var(5) c = a * b + b c.backward() print(a) print(b) print(c) ###Output Var(v=3.0000, grad=5.0000) Var(v=5.0000, grad=4.0000) Var(v=20.0000, grad=1.0000) ###Markdown Create an artificial dataset to play withWe create a non-linear 1d regression task. The generator supports various noise levels. You can modify it yourself if you want more or less challenging tasks. ###Code from math import sin import random import tqdm as tqdm import matplotlib.pyplot as plt def sample_data(noise=0.3): x = (random.random() - 0.5) * 10 return x, sin(x) + x + random.gauss(0, noise) train_data = [sample_data() for _ in range(100)] val_data = [sample_data() for _ in range(100)] for x, y in train_data: plt.plot(x, y, 'b.') plt.show() ###Output _____no_output_____ ###Markdown Building the neural network.We'll create a feedforward neural network consisting of a series of dense layers. See the image below. Each dense layer is just a number of artificial neurons. In the image below each column of circles (neurons) is a dense layer. It's dense because the weight matrix is dense; there's a connection between every input and every output neuron in the layer.The inputs to create a dense layer is following:1. **The input size and output size**. We have to define the number of inputs and outputs. The inputs are the number of inputs to the layer, and the output size is the number of artificial neurons the layer has.2. **Activation functions**. Each dense layer must have an activation function (it can also be the linear activation which is equivalent to identity function). The power of neural networks comes from non-linear activation functions.3. **Parameter initialization**. We will initialize the weights to have random values. This is done in practice by drawing pseudo random numbers from a Gaussian or uniform distribution. It turns out that for deeper models we have to be careful about how we scale the random numbers. This will be the topic of a later exercice. For now we will just use simple Gaussians. See the `Initializer` class below.Note that we use Sequence in the code below. A Sequence is an ordered list. This means the order we insert and access items are the same. ![f2.jpeg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4RDaRXhpZgAATU0AKgAAAAgABAE7AAIAAAAFAAAISodpAAQAAAABAAAIUJydAAEAAAAKAAAQyOocAAcAAAgMAAAAPgAAAAAc6gAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAERUUDMAAAAFkAMAAgAAABQAABCekAQAAgAAABQAABCykpEAAgAAAAMxMQAAkpIAAgAAAAMxMQAA6hwABwAACAwAAAiSAAAAABzqAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjAxMjowODoyNCAxNDozODo0MAAyMDEyOjA4OjI0IDE0OjM4OjQwAAAARABUAFAAMwAAAP/hCxdodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvADw/eHBhY2tldCBiZWdpbj0n77u/JyBpZD0nVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkJz8+DQo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIj48cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPjxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSJ1dWlkOmZhZjViZGQ1LWJhM2QtMTFkYS1hZDMxLWQzM2Q3NTE4MmYxYiIgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIi8+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPjx4bXA6Q3JlYXRlRGF0ZT4yMDEyLTA4LTI0VDE0OjM4OjQwLjExNDwveG1wOkNyZWF0ZURhdGU+PC9yZGY6RGVzY3JpcHRpb24+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iPjxkYzpjcmVhdG9yPjxyZGY6U2VxIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+PHJkZjpsaT5EVFAzPC9yZGY6bGk+PC9yZGY6U2VxPg0KCQkJPC9kYzpjcmVhdG9yPjwvcmRmOkRlc2NyaXB0aW9uPjwvcmRmOlJERj48L3g6eG1wbWV0YT4NCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0ndyc/Pv/bAEMABwUFBgUEBwYFBggHBwgKEQsKCQkKFQ8QDBEYFRoZGBUYFxseJyEbHSUdFxgiLiIlKCkrLCsaIC8zLyoyJyorKv/bAEMBBwgICgkKFAsLFCocGBwqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKv/AABEIAmgEnwMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/APpGiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAopsis0bKjbWI4PpXiGoeKfF8fxlh8JQayBbyjfv2dB6UAe40VHbpJHbok0nmSAYZ/U1JQAUUUUAFFFFABRRVe/voNOspLq6kCRoMkk4oAsUVBY3aX9lFdQ52SruXPpU9ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVxnxO1DVtD8GX+r6Re+RJbJvClc5oA7OivL/gxr3iDxf4ZTW9a1ISqzFfJC46Vp/FbUNc0Dwlea3o2pfZzbKD5RXO6gDvaMgnAPIrhfhZqWs+IPBdtq+sX/nyXKnChcbTVnwl4e8QaT4j1e71vVTe2ty2beP8A55igDsaCQBkkAeporxX44a94i8PajpI03UzHZ31wqPCB7+tAHtVFQWTFtPt2Y5JiUn8hXL/EyXV7TwVe3uhah9int4y27bnPFAHXgg9DmivO/ghrOoa58N7e81acz3BkYM57816JQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFcn4+8eWfgjS0lkUT3k7bILcHlzQB1lFcFoum+MdVsRqF7rRsjcrvS2EefKB6Vk6n4s1z4a6nanxZdf2lpF5II/tm3BhP0oA9ToqG0uob60iubVxJDKodGHcGpqACiis2DXbO48QT6RE4a4gjEjgHOAaANKiiigAooooAKKKoWms2l7qdxZWziSS3GXK8ge1AF+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK8C1D/AJOos/8Arka99rwLUP8Ak6mzHfyjQB3nxe8Saz4W8NxahozhAJQkjEZ61jand+PtV8FprOlXq2LwQGUpjPnADNXvjz/yTc+n2qP+ddBZZ/4VT/3Dm/8AQTQBieEvG+qeI/hVLqqRqdSt4mEnPVhnn9K5HwP4v8e+OtE1BLGZY5IpSouyPuH+7im/Ax3b4X+JdzEhXlA9uGrT/Zz/AORa1bHT7Yf60AQfD/4keIrbx1ceDfHoxeEEwXGMbvStMeJvEWlfHC38N3mofarC6hMqrtxt9qoeNNOTUfj7oJ06MPcwwlpyvYA96NeYD9p3SCxCj7HjJ4oA1fi34i1/wlPpeoaVqG23uLpYZLYr1BrJ+PFvrE/hCwvrTVmtrdpYw9uo+8T3zUv7QdxD/ZehxeYu/wDtBDjPTmrXxsdT8L9PYMpX7RDznjpQB1Xw/wBL1ax0G0fUtXN7G8IKJtxtrr6x/C0iHwxpyh1LGBTgGtigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK4j4w8/CrWf+uNdvXEfGH/AJJTrX/XGgDzv4D+ONF0P4cxWd886zLIxOyIsPzrX+LfxA0LVPhnqdpayXBlkUBQ0JAzn1qb9nO2gl+FcLSQRMfNb5mQEmtv402tvH8J9XZLeJSEHIQcc0AN+ELzR/BrT3thmURMVHvVL4UeK9e8QeI/EVrr04dLOQCFAPuDNaPwa/5JBpv/AFyaub+EAJ8ZeMcdfMxQB0eqeJ9T1/xo/hrwzcfZvsnN1dgZ2+1eXfG/TNb07V/D7arqp1CF7ldilcbDmt74Ua7DY/FzxPo+onyrq4kDRFzgtij9oxlF54ZBYA/ahxn3oA7zxl41/wCEW0LTrWyTztTvkSO3j/AAn8K5n4h6J4th+Hd7dvr5nAgLSW5TG4EdM+1Y3xSvhpHxG8D6pcgNZpGqliflBNek/EeeOX4ZalMki+W9uWDA8HIoA5D4H6lBo/wUjvrxgkcTOST3PpWt5/iDxRpR1jTfEUelLIrFLXg4x0/OuB8MW897+y3cRWKmWZZGfYp5IBrqPhMfCvinwTbMSBeQDZcRNLgqw9qALHw5+ImqeJdF1uw1CPGraUGxJjAlx3rK+HvjHxl41tNWtBKqSRylEusf6nnpivQbOw8PWa6nHoccYuY4H85oznt3Neffs8Zx4i/6+z/M0AXPAnjPxDa/Ee98HeKboX0y5aKcDGBXr9eB2mf+GsZ/+uTfyr3ygAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvnTx5cf2z+03odhcgmC22kITwT64r6Lr59+LOnT+GfjJofjEW5NhuVJ5B2PvQB9BAADAGAK86+OdjBefCvUmuEDGFd6H0Nd9ZXkN/ZQ3Vs4eKZA6sDnINeb/HjWktPAMulQqJrzUm8mKJTlvrigDk/AWta837PL6tZ6mYZ9PDbcjO5VIGP1r0fwPquqeKfhzDdz3flX06cTgfdPrXH6X4Xn8Jfs231heDbO9qZXHoSQa6X4Pyxp8MLF2kQKF5JPAoAx/ht4j8Q+Jo9e03UNSzcWrtFDchfukHGa4b4V6X4hv/H/AIhjTxC8dzA+JJiuTIM9K6r4JOja/wCJcOpzdORz15rP+DMiJ8TfFhd1UCQ/eOO9AF/xn448X+H/AIhaTpVqBJbuQrJj/XmqnjXXfiR4OlHiCWcTaTvBltgv+rUnpVzx8Qfjh4WOQV8wYPbtXc/FJoh8M9Z8/btMBGG7nNAHKeM/G2pXPwuh8Y+FNS8hFVTJFtzuJ6iuttL3Utb+G1tqFvefZb2W180y4zzjNeSvp9zpv7Kk0V1EY2Zg6r/sk8V6n4Xnhi+Edk7yoFGn9d3+zQBz3w18Qa/428A6j9q1Hyb+Kd4UugvTHfFcj8F9J8QXOqavIPELjyrlllBXJk5roP2fpEbwbq211JN7KcZ571H8DJI1vPERaRRi7fOT05oA9kQFUUMckDk+tLQCCMg5BooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAGyqzxMqNsYjAb0ry+7+D1xdeNF8THX5Vv1Pyts6D0r1KigDivG3gO68Z6LBptxq7wwoQ0mF++w71bg8J30Hg1tCXV3JMflibbyFxjFdVRQB5x4a+Hf/AAr/AMK6xb2+oPcwzwyOyMuPmwea4b4F6Nqt5omrzaZq7WUZuiDGFzzzzXt+u6bNq2kTWVvdG1MylTIBng1y/wAPfh0/gKO4hh1R7mGd97Iy45oA0vDXguHRL6bUr24N9qcxO65cc49BWb47+G0Xi+9ttRs799N1K24S5jGTiu5ooA8w174Nx+I9DtrXVNXmlvIJBIbrHLY9q6LXvAFl4g8Cp4bu5m2RqNs3cMO9dbRQByvgvwa3hXTo4Lq/k1CaIbI5X42r6YrqqKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArmfGvhSfxdos2ljUGtbeddsgVc5FdNRQBwvw/+Hlx4DtRZW+rvcWKnIhZcc1c8deC7rxnpr6d/ar2lnKMSRqud1ddRQBxnhPwRe+FfDh0eHWHlgVSsRK/czVLwd8NLjwj4gu9Sh1mSZbxt08TL96vQKKAPO/GnwksfE2uwa7pl2+latEcm5iH3vTNZ2t/Bu58TrZP4h8Qy3lxaEESFMZxXqtFAHH+JfhzpfinwjDoupEsbdAIZ/wCJCO9c7D8JdVfw4+g6j4pnutMK7ViZeVH1r1KigDkfAfgC18EaA+kxTtdQvnO8dj2rjL/4BW6+ILjUfDmtT6Slw254Y+h9a9hooA5XSvBEOi+G59P066dLq4TbNdtyzH1rK8BfDSXwNc3b2+rvcR3TF3Rlx83rXf0UAebR/CiaP4iN4tGtSfa2OCuzjHpXpCghQCckDk0tFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFUNZ0Ww1/TZLDVbdZ7eQYKsKv0UAcBaeANd0oG30bxRNa2Kn91AUzsHpmrel/DyFdaTWPEV22r6hF/qpJBgJ+FdpRQBU1PTbfVtLn0+8TdBOhR19q8+8M/Ca48PXMkH9vzT6QzEiyIwAD2zXplFAHBeCPhfb+C9cv8AUIL6Sdbp2ZY2HCA9qrSfCO1j8Zza3p9/Jaw3JzcWyDiT8a9GooA8O+Jti918WPDVhaTG2diFjlHJWu5uvAV9rEsMXiDW5L6wRtz2xXAf61U1z4YXGteMLbxA+tyRz2rboVC8LXfW0ckVuqTSmVwOXPegDO1bw5Yav4ak0SaILaPGIwoH3QOlcV4f+E1xpEUtlea/PeaawIitiMCMV6XRQBw3w9+Glt4C+2rb3j3KXTlgrcBM9qo6T8I4NG8XXWqWWpypZ3bF5rMdGP1r0eigBsaLHGqIMKowKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFctrXxF8P6BrMOlajcPHdzOEjTZ94n3rqa8J+OEUZ+InhN9g3CUc4/wBqgD3UHKgjvS1UutQtdNsPtF7MsUarkljjt2rAtfiP4dur+O0FzJDJKcIZo9gb6GgDqqKxta8V6T4edF1W48gSDKsRx+dXNO1a01Wx+12Tl4cZDY60AXaKxrDxVpepao+n2kzPcR/fXb9361HrvjDSfDuBqEkjHGSIU3lfrjpQBu0VlaH4m0rxHY/a9Ku0ljH3gTgr9R2qu/jTQI5LlDqMRNqCZSGBAxQBu0Vk+HvE2leKLFrvRbkXEKttYjsa1qACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAr317Dp1lJdXJIijGWKjJxWL4X8c6J4vmuo9DuDMbU4kJXGDW3egNYThgCPLbg/SvFf2fY1j1jxQEAA+0n+dAHuNFYes+MNH0OZYbydmlPVIV3kfUDpUmieKdJ8Q2kk+l3IlEX30xhl+ooA2KK5y38eaBdah9hhuy11nb5W35vyrT1bW7HRLQXOoyGOI/xYoA0KKoW2tWN1pQ1FJdtsRkO/FYUHxL8Nzaklk11JBK7bVM0exWPsTQB1lFUdS1nT9IsvteoXUcMPGGZhz9Kx7v4heGrKa0iuNRRXuziIev1oA6aisDVPGujaRciC5lkdyAf3Sb8A+tX9I1yw1yBpdOl8xV4PGMUAaFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXKzfEbw/B4mh0CS4cahM21YimK6qvCPG8Ua/tJaC6oAxReQPpQB7vRVLVNXstGtGuNQnWJB0GeW+g71j6Z8QNA1XUlsILl47h/uJMmzd9M0AdLRWFq/jLRdCvBbapdeRIRkbhwa0rfUre50/7bCWMONwOOSKALdFZGk+J9M1u4mh06YyND9/5cAVQ1j4gaDod00F7PISv3njj3Kv1NAHTUVQstb07UdLGo2l3FJakZ8wMMD61lTeP/DkOlSai+op9mjfYzj1oA6Siqmmanaaxp8d7p0yzQSjKup61boAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArwz43/APJQfCf/AF1H/oVe514N8bLlpviF4c+z2lzMLWUGVo4yQOaAPQfiH4Vh8R2FjNeaqbC2s3WV13YEmOcVwnxhvrLV/B9ldaTZlIrS5QLc7djKe2Pap/jbNq0p8P31nFcS6KjrJdRRqdx57j6VW+KvilPEPw1hPhnTJ5bNJ4zMTCVZMeg70Adrr3hqPxx8J4oL4lrkWvmJIOu4Dis34SeLRL4CubO+jEV1ogaN4zwWAya6n4e6k2peD7RjbvCscYQCRcE8eleX+LPCepaX8YrUeH5CLfVj5l1EOBt70Adn4YtnsPDWveKQgSe/V7mI45UAH+tZHw91HxBf+Hn1L+x7e/a+kLSSyPnPJ4xXplxpEL+HZdKgAjieBogB2yMV4D4U8Xaz8HtcvtA8R6bc3GkmUtDcRoWwM0AdV4F+H/iHRPiZqeqXcCW+jaijbrZJMhWPoKw/AvhLTb74u+J7KdWfT0kbFszHGfWvSPCvjC+8V3zapb2slvokUZ/1i4Z2x1xXBfD3V0i+M+vvLaXSQ3kpMMjREA/jQB6/4e8L6R4WtZbfQ7RbWKV97qp6mteiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIbz/jxn/wCubfyrxX4AjOseKQDgm4PP417LqlwlrpVzNIGKrG2QoyeleJfAqaaHWPEsUlrcQS3EjNCZIyAeaAOt07SNL8F+LdTvtQ1BtSvNTICQ7d5i9sdq5fwQZbL486zDFF9kjuV3PAD8v5VS8A+IJPDPj/XU8a211JfXUuLeURF1IzwB6UaXrOoW/wAf7+6vdKmjM6qsJVCVK+pNAGj8RdNTwJ8SdK8ZWMBeGeTy7pP4Vz3rsfHVyniGz0vQrUK/9qFZQ4Odqjmt3xr4fh8UeD7zT7j5N8e9WxypAzXAfBDStTks57rXSZltZDHZuxzgA4oAl8d317beM/DHhnTLdJ7ZkzLbltocr0pvxD8LeIfGvhwWUfh+2trmJw0E6SAFMe9Hxu8Pay0mmeKvDMTSXuktuZV6suelGifGxtcso7GHRbpNbYBWiaMhVPrmgCj8RNAu4/gLEniPLapZ7FEiv905/Wtvwf8ADnQNW8EaZc6raC7vBEGEzMc7u1V/ixLewfCN7K/Sa61CdlbbEhboc4rpPhvqa3vw7tfJililgg2lZU2kMBQB5xp3jaT4d+N9R0vxvprixumBivdm4BR0GfpXq3g6LRZrebU/Dlyk1peEOFQ52n+lcxZeIdE8X2l3o3izTnea3LBmkh+8PY1m/Bfw1c6DrOvPa+dHossubSKXPHPvQB69RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV4X44/5OP0D/cX+le6V4F42vd/7QejXaWl09vAFWSRYiVB470Aeg+O/DFpf65pmuapqrW9rp8gc2ueJfbHeuC+LN9Ffaj4c1fTLU2yJdBY7jbsY89CKn+K11qVl8S9B1O9guLjw7FtdkiUnDdyRVX4veJH1zTvD93o+mTyaXHdh2kERDA8cbaAO0+KHhJPFnw9F5gm/s4RPEy9WIwcU7wX46jvvhQdSliVbiwi8mSHvuGB0rrvDV6dV8N20ssBiVowuxxg4x3FeN23hDUdK+NEukafIX0e5/wBIuY+wB56UAdHdrceCfg5qepWmEvr0mZW9N5GBVvQYtauPBNvat4btp4ru3DSO0gJcsOtdV428NL4i8C32iwIA0kO2EehHSvIvAnxW1HwVYjwz410u732bGKC4jjLbwOgoA3vh54C1rw5pWv2XiGMLpkyvJbwrJnacVjfBHwbpmveGNWTWEN1bfbXRIHJwuDXo1hrepahoepa1qkEkFjJAwgtwMt064rjv2f7/AMjTdT026tbiC4a7eVfMjIBUnigD1jRtFsNA01LDSoBBbJ91Ac4q/RRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTGhic5eNGPqVBp9FADXijkTY6Ky/3SMimC1t1i8tYIhH/dCDH5VLRQBxHxB8dzeBoLc2WjyX/mnlYlPyj8Kg8FLqHifVD4p1q1NowUx2kLDkIeua7x40f76K31GaUAKMKAAOwoAWoZ7O2ul23MEco9HQGpqKAGRwxwxiOKNUQdFUYFAgiVtyxID6hRT6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAAgEYIBHoaYsMaHKRop9QoFPooAie1t5JBJJBGzjoxQEih4oU3TeShcDOdozUtFAHkF78Utc1bW7rw7pXh+ZJGby0uGUhSvQmvS/DmjR6DocNjGc7fmY/7R5NaIijDbhGob1CjNPoAQgMCGAIPY1BHYWcMxlitYUkPVlQA1YooAa8aSffRW+ozQsaIMIiqPQDFOooAryWNrKQXgjJBznaKnVVQYRQo9AMUtFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUwwxFtxiQn1Kin0UAMkhimTZNGki+jKCKb9mg2BPIj2r0XYMCpaKAPPvHvxIufBepW1nZ6JNfrLjLRqcL+VXvA1lfX0s3iXW4fIvbxdixf3Yx0rsWijc5dFY+4zTgABgDAoAKrzWFpcOHntYZGHIZkBNWKKAE2Ls2bRt9McU1Yo0OUjVT6hQKfRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGbr2u2vh3SJNRv1laGPqIU3N+VcN/wvbwr/z7at/4BNXpTIrrh1DD0IzUf2W3/wCeEX/fAoA85/4Xt4V/59tW/wDAJqP+F7eFf+fbVv8AwCavRvstv/zwi/74FH2W3/54Rf8AfAoA85/4Xt4V/wCfbVv/AACaj/he3hX/AJ9tW/8AAJq9G+y2/wDzwi/74FH2W3/54Rf98CgDzRv2gfBccwhkOoJKRkRtakMfwpf+GgPBn/UR/wDAU1teOPAVvr8C32mRRW+q24zFIEHzj+6a8/0+/jmeW01K1htL+24mjeMAcdx7VyYivOjqo3RMm0dKf2gfBaqWY6gFHJJtTgU6L4++ELiMSW8epTRno6WhINc1puk3HjrUzY6fDHBpULYubryx8/8Asj1Fex6XoWm6Pp0VlY2cMcMS4UBBWlGpOpHmkrDTbOE/4Xt4V/59tW/8Amo/4Xt4V/59tW/8Amr0b7Lb/wDPCL/vgUfZbf8A54Rf98CtxnnP/C9vCv8Az7at/wCATUf8L28K/wDPtq3/AIBNXo32W3/54Rf98Cj7Lb/88Iv++BQBwul/GTw5q+pQ2Nrb6kJZm2qZLQqM+5rv6jFtApysMYPqFFSUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABVXUr+LS9OmvbgOYoV3MEXJx7CrVIQGGGAIPY0AebH46+FlYg22q8f9ObUn/C9vCv/AD7at/4BNXo32W3/AOeEX/fAo+y2/wDzwi/74FAHnP8Awvbwr/z7at/4BNR/wvbwr/z7at/4BNXo32W3/wCeEX/fAo+y2/8Azwi/74FAHnP/AAvbwr/z7at/4BNR/wAL28K/8+2rf+ATV6N9lt/+eEX/AHwKPstv/wA8Iv8AvgUAec/8L28K/wDPtq3/AIBNR/wvbwr/AM+2rf8AgE1ejfZbf/nhF/3wKPstv/zwi/74FAHnP/C9vCv/AD7at/4BNR/wvbwr/wA+2rf+ATV6N9lt/wDnhF/3wKPstv8A88Iv++BQB5z/AML28K/8+2rf+ATUf8L28K/8+2rf+ATV6N9lt/8AnhF/3wKPstv/AM8Iv++BQB5z/wAL28K/8+2rf+ATUf8AC9vCv/Ptq3/gE1ejfZbf/nhF/wB8Cj7Lb/8APCL/AL4FAHnP/C9vCv8Az7at/wCATUf8L28K/wDPtq3/AIBNXo32W3/54Rf98Cj7Lb/88Iv++BQB5z/wvbwr/wA+2rf+ATUf8L28K/8APtq3/gE1ejfZbf8A54Rf98Cj7Lb/APPCL/vgUAec/wDC9vCv/Ptq3/gE1H/C9vCv/Ptq3/gE1ejfZbf/AJ4Rf98Cj7Lb/wDPCL/vgUAecN8ePCaKWeDVFUckmzbAqJP2g/BMqb4nv3Q/xLakivSZtPs54XimtYXRwVZSg5FeP+J/Csnga+a+0+1SfQ5mzJGIwTbk+lZVZTjG8FcTubP/AA0B4M/6iP8A4CmkT9oHwXLKYojqEkg6otqSR+FcvqOsWdrYRy2cMNxNcYWCNIwSSa734e+B00WzbUtXgifVLwbpPkHyDsBWGHxE61242Qotsz/+F7eFf+fbVv8AwCaj/he3hX/n21b/AMAmr0b7Lb/88Iv++BR9lt/+eEX/AHwK7Cjzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAPOf+F7eFf+fbVv/AJqP+F7eFf+fbVv/AJq9G+y2/8Azwi/74FH2W3/AOeEX/fAoA85/wCF7eFf+fbVv/AJqP8Ahe3hX/n21b/wCavRvstv/wA8Iv8AvgUfZbf/AJ4Rf98CgDzn/he3hX/n21b/AMAmo/4Xt4V/59tW/wDAJq9G+y2//PCL/vgUfZbf/nhF/wB8CgDzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAPOf+F7eFf+fbVv/AJqP+F7eFf+fbVv/AJq9G+y2/8Azwi/74FH2W3/AOeEX/fAoA85/wCF7eFf+fbVv/AJqP8Ahe3hX/n21b/wCavRvstv/wA8Iv8AvgUfZbf/AJ4Rf98CgDzn/he3hX/n21b/AMAmo/4Xt4V/59tW/wDAJq9G+y2//PCL/vgUfZbf/nhF/wB8CgDzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAOW8K/EjRvF9+9ppcN6kiruJuLcoMfU111MSGKM5jjRT6qoFPoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDk/FXxG0bwfeR22qRXru67gbeAuPzFYH/C9vCv/Ptq3/gE1ekPDFIcyRo5/wBpQab9lt/+eEX/AHwKAPOf+F7eFf8An21b/wAAmo/4Xt4V/wCfbVv/AACavRvstv8A88Iv++BR9lt/+eEX/fAoA85/4Xt4V/59tW/8Amo/4Xt4V/59tW/8Amr0b7Lb/wDPCL/vgUfZbf8A54Rf98CgDzn/AIXt4V/59tW/8Amo/wCF7eFf+fbVv/AJq9G+y2//ADwi/wC+BR9lt/8AnhF/3wKAPNP+GgvBQlMJa/Eq8mM2p3D8KX/hoDwZ/wBRH/wFNXvHXgFb2T+2/D0EMepxDLpsGJ1HY1xmn6nZ3lvI1xBDbTwcTxSRgFCOv4Vx4jETo68t0TJtHSN+0H4JjQvI1+iDqzWpAFPT49eEpUDxQ6pIh5DLZkg1z+geHZfH2oB3t0t9Cgb5mMYBuCO30r2K10uxsrWO2trSGOKNQqqEHArelOc480lYauzz/wD4Xt4V/wCfbVv/AACaj/he3hX/AJ9tW/8AAJq9G+y2/wDzwi/74FH2W3/54Rf98CtRnnP/AAvbwr/z7at/4BNR/wAL28K/8+2rf+ATV6N9lt/+eEX/AHwKPstv/wA8Iv8AvgUAec/8L28K/wDPtq3/AIBNR/wvbwr/AM+2rf8AgE1ejfZbf/nhF/3wKPstv/zwi/74FAHntv8AHDwxc3EcMdtqgZ2CjNmwFeiRSLNCkqZ2uoYZ64NNFrbg5EEf/fAqXp0oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArkPGPw80/wAWywzmVrO6Q4aaIcundTXX0Umk9GBS0nSbPRNNisdPhWKGIYAA6+5q7RRTAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKo6nrOn6PAZtSu4oFxkB2ALfQd6AL1FebX3xk06SV7XQLG6vbodC0RCH8azm8f/EG4407wrauw6+ZKRx+dUot7ITaR61RXkq+PPiNbc6j4UtEU/d8uUn+tXLX4xRWTrB4o0u5tJmOAYYy6j6mhxa3QKSZ6dRWXo3iTStehD6beRykjJj3DcPqK1KkYUUUUAFFFFABRRRQAUUUUAFFFFABUc8EV1A8NwiyRuMMrDIIqSigDiNB+F2k6F4mm1aN3mGc28D8rBnriu3oopJJbAFFFFMAooooAKKKKACiiigAooooAKKKKACis7Vte0zRIDLqV3FDgZCsw3H6CuEvPjHaXUjW3hrTrm8uVOMyxlU/OmlcD0yivJW8ffES4/5B/hS0cDr5kpH9aF8ffES3/wCQh4VtEz08uUn+tPkl2J5l3PWqK8ztPjJZ2zrb+I9OurO4J6xxlkH413ek69pmtwCTTbyKfIyVVhuX6iptYo0aKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArivE/wy0vxJrMGoGR7Vgw+0LEMCdfQ12tFJpPcCCzs7fT7SO1s4lihjXaqqOgqeiimAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFQi8tjcfZxPGZv8AnnuG78qAJqKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorg/Hfiq5jnTw74fbOo3PEki/8ALBT3ppX0Buw/xV4+NrdHR/DUYvtUfjI5SP6n1rmrXwRJqE4vfGV9Jqk+dywsSFiPoK29B0C10Cz8uFd9w/M0zcl27mtOuuFJR1Zzym3sRW1pbWcIitbeONB0AUVNn04+lJRWpkLk9+frTJYop4mjmhjdW6gqKdRQByt/4FtxMbzw1cyaRfdfMiJ+b2xWr4c8eXen3sejeNIxb3B+WK6/hk9ye1atU9V0mz1uya1v4hIpHynup7HNZTpqWxpGbR3asHUMpBUjII70tea+C/EN3oOrf8It4glMgJ/0G5f/AJaD+7+FelVyNNOzOhO+oUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAhIVSWOAOSTXBeKPH0xuzo3hGIXuoNw0o+5EPXPrUfjrxLc3d+nhjw8+bqb/j4mT/AJZL3Bp2i6JZ6BYi3skG5uZJDyWPfmtadPm1exnOfKYll4HS5uBfeK7uTVrs8gSEgR+wrqILeC1iEVvBHGi9AFFPorrSS2OdtvcXJ7cfSjJ78/WkopiGTwQXMRiuII3RuoKiuWvPAsdtObzwndyaRd53N5ZJEh9DXWUUmk9xptbGf4Y8e3Ed8ui+L4hZ33SKb+CUe59a9ABBAIOQehrgNZ0Sz16xa2vUG7rHIOCjdjmo/A/ia5sNQPhbxHIftMYzazt/y1T3PrXJUp8uq2OiE+bRnodFFFZGgUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBxfjnxNdWl9ZeG9FIGq6mCY2P8CDqam8MeAotBvBf3V/Nf3xB3SyH1rnPFagftA+EWHU2k39a9RoAKKKKACiiigAooooAKK5Dxl8S9D8D3dta6sLiS4uVLRx28e8kDvXOf8AC/vDH/Pjq3/gKalzitG0NJvZHqVFeW/8L+8Mf8+Grf8AgIa67wZ460jxzYz3OimUC3fy5UmTayn6UKUXs7g01udJRRRVCCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiijOelABRRRQBleJtdg8N+HrrVLnlYEyF7sfSvO/A+mTrbT65qZMl7qLGRXbqIz0FT/Fa7fUda0TQbVtwkn33SD+5xXQpElvEkEfCRLtX6V0UY/aMaj6C0UUV0mAUUUUAFFFFABRRRQBheMNEfWdEL2Z8vULX95bzDquOTXT+AvEqeJvDENw3FxD+5mU9dy8E/jVYcnB6Hg1yvguX+wfitq2nyHZaXqKbZe27vXPWjpzG1N9D1miiiuY3CiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKOtFABWF4x8QxeGfDVxfynD42RD1c9K3a8p+JM51rxzovhtTugYGabHRSDxmmld2E3ZXHeCtIlstOl1PUBnUdRbzJye3pXSUp4AX+6Av5Uld6VlZHI3d3CiiimIKKKKACiiigArnPGukve6SNQsspe6efOV16sB/DXR0qhWO1xlTwQe4pNXVmNOzuaXg3xCviXwxa3/AmZMSp3VvQ1u15R8NpzoXjjWdBnYj7ZIbmBD2UZ6V6vXA1Z2OtO6uFFFFIYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/kv/AIQ/69Jv616hXl/iz/kv/hD/AK9Jv616hQAUUUUAFFFFABRRRQB418QgP+F6+HMgH/QZOCM9zW0VTJ/dp/3yKxfiF/yXXw5/14yfzNbR6mvzDiv/AH9f4V+p9Fli/c/MVFTzB+7Tr/dFYHwg1jS9K1vxguo39taM2ogqksgTIweRmt9Pvj614NceDv7b8Z69rV2jPZW98ItqkjLnpn2ro4R/3mp/h/UzzRfu4+p9Uf8ACZ+Gv+g9p/8A4EL/AI0f8Jn4a/6D2n/+BC/415Jpuj+CtPnhsPGPhKC2lkAMd3EWaFl9S2eK7u1+FXw8vrdZ7PRLOeJujxuxB/Wv0c8E6D/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xr5e8b/FvxHoPxIu20PWzcWCt8kaNlCK+hZPhF4DjjZ28PW+FGTgt/jXx58Rk0qLxxfQ6DD5NpG5VUz0IoA9j8OftSz+YkXiLTUCDrLDkk16z4b+M/g7xKwW11EQOe1x8nNfDVOQ4bv8AgaAPr2W7g1f48XgtJkuIoLFHDo2VzgV2pOTmvAv2fkurTxXqH9o7sy2w2FjkkV75XZS+A5qnxBRRRWpmFFFFABRRRQAUUUUAFcP4puoNJ+KfhG7uZkghlkYSSOcAfU13FeO/tAWM2qy+HLG0QySvIflHp3rOr8DLh8R7ufGfhoHB13T/APwIX/Gj/hM/DX/Qe0//AMCF/wAa8d8G6L4Ge1trHxL4bgjlb5FvNxMbt3BOeDXo6/CHwGyhl8P2xBGQQzc/rXEdRt/8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NH/CZ+Gv+g9p/wD4EL/jWL/wp/wJ/wBC9b/m3+NH/Cn/AAJ/0L1v+bf40AbX/CZ+Gv8AoPaf/wCBC/40f8Jn4a/6D2n/APgQv+NYv/Cn/An/AEL1v+bf40f8Kf8AAn/QvW/5t/jQBtf8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NH/CZ+Gv+g9p/wD4EL/jWL/wp/wJ/wBC9b/m3+NH/Cn/AAJ/0L1v+bf40AbX/CZ+Gv8AoPaf/wCBC/40f8Jn4a/6D2n/APgQv+NYv/Cn/An/AEL1v+bf40f8Kf8AAn/QvW/5t/jQBtf8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NR3HjPw39ml269p4Ow4P2hfT61k/8Kf8Cf8AQvW/5t/jUdx8IfAqWsrDw9bghCeGb0+tAHzbf/GbxX4f8a3zWWrG8s1lOyJmymPau+8NftSxSSKnibTRCo4LwZOa8G8W2Edv4v1C10+ErFFKQqLzgVmiwZbdZ5nVYycEZ+YfhQB9x+HPi14Q8TKDZapHEx6LOQhrkbS4i1T4u6ncW8iyx2x2h1OR+FfJwuLazn3WoaXjhmJGDXvHwCklVLh7lmd7v5lZuuK0pfGiJ/Ce2nqaSg9aK7TlCiiigAooooAKKKKACiiigDjrmWK0+PGkXDOqM9mY8E9c17DXzn8V9NvdQ8e6ZLpErxXlpb+cWTrtHpXb+Ffibd21jbL4pt28mUARXcYzj/rp6GuKp8bOqHwo9VoqG0vLe+t1uLOZJ4m+66HINTVmWFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/wCS/wDhD/r0m/rXqFeX+LP+S/8AhD/r0m/rXqFABRRRQAUUUUAFFFFAHlfxJ8HeKtS8c6T4h8JxWk7Wdu0Tx3D7c5NY39lfFr/oE6R/3+r22ivPxOW4PFT9pXpqT2ub069WmrQdkeJjS/i0rA/2TpHH/Tatv4c+ANYsNI8RReMY7dZdZn8zZA24IMHpXqNFVhsvwmEk5UIKLYqlepUVpu55p4Z8qG6ufAniqFLiNMmyaQf62Lnv61JdeC9c8K3JvfA94ZIAf+QbO37sD2rY8f8AhuTVNPTU9MJi1OwPmxSJ95lHJT8a0fCHiSPxNoUd1wlynyXEXeN/Q13GJk+H/iRYX85sNZjbS7+P5XWcbUZv9knrXZqwZQykEEZBHesfxB4U0jxLb+XqlqkjqP3cvRkPqDXFtaeLvALtJaStrmjr8ziU5ljHoooA9NornfDnjfR/Ekai2mMFyetrP8sg/CuioAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKbJIkUbSSsERRksxwAK4fXPiREt0dN8K27apqDcKyDMSn3agDsb/ULPTbVp9QuI7eEdWkbAr4y+MOhWTeJrjWfDVpcnTpmJklZPk39yD6V9JWPgDUdeuhqPjq9afdydNRswrW74s8FWXiHwPc+HoI0tonj2xbR9w9qAPgOrOnWcmoalb2kIy80gUCtnxd4L1Pwfrk+nahHuaLJ3JyNvYmui+B/h99d+JunnZvitHEsg9qAPb9Q0ePwX4i8N3aLhNQgS2fj7rACvQnADnByPWo/iX4dk1rwsZLJN15YnzbdfcVl+F9Xj1rw9BPG2ZIh5Uw9HHWuqjLSxhVXU1qKKK3MQooooAKKKKACiiigBRyRXGwWq+JPjAtsQHTSYyX9twrqNT1CDSdKnvrptsUSnn37VU+FGjTra3niLUEKXmpOeD/AHAeKxrStGxrTWtzM0PTLS21/VfA+sxB7F2M9k7ddzcnB9q0LbUtV+Hl2tjrRlv9GdsQXYGXj9m9hUvxQsJLGOy8V2Kk3GlSBmRf+Wik45rsbSW08Q6DDLIiTQXUQLKeRyORXIdBbtLy3v7VLi0lWWJxlXU5BqavOLrTNU+Ht4+oaJ5l7ojnM9n1MI9UFdroevWHiCwW60+UMCPmQ/eQ+hFAGlRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZEEkTIejAg06q99f22m2b3V9MkMKDJZzgUAfIvxv8Ban4Q8TyatYoxsLsk+cozg+9ePs7OxZjknrX2drkF58XbebSYYTaaBn57lxh3YdNvtXzJ4++GmseCvEE9m9tLPagkxTqpIK0AcbEnmTIg/iYD9a+nfC2m/8ACI6r4VgYYgvrUszdlPpXzbo0Rn12xiAyXuEXH/AhX2f4/wDC883gOym0xB9r0xUcEf3APmqouzuJq6sbB60lUND1aDXNEtr+2PyuuCO4I4NX67zkCiiigQUUUUAFFFFABSqu5gB3pKo63qkei6Jc30jBWRD5YP8AE3YUDMDw/F/wkPxnk1FQHt9OtmtpF7biKuXtpD4M8VS2moQrP4c1pvnDjIjlPQD2rU+FGhSafoE+q3SlLnV5PtEit1XrxXUeI9Ct/EWiT2FyoJdSY3PVG7EVwSd3c64qyscZdeFtb8HznUPBM32myPLadK3yKvqtdB4Z8dab4gJt33Wd+h2vbT/KxPt6is74f67cKZ/DWtMRqGnnYjP1mQfxVqeJvBGneIT9oXNnqKD93eRcOtSM6WivNrXxPrvgmdLLxhC11ZE7Yb6Ibjj1f0r0Cx1C01O1W5sLhJ4m6OhyKALNFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/kv/hD/AK9Jv616hXl/iz/kv/hD/r0m/rXqFABRRRQAUUUUAFFFFABRRRQAUUUUAFea63DJ4A8YJrtoh/sjUH2XkafwyHo30r0qqeq6Zb6xpk9jdrmOZCpOORnuKALMM0dxCksLh43GVZTwRT68+8DancaFq83g/WGO+ElrGQ/8tIueteg0Acp4j+H+l65Kby23adqX8N5Bw1c/H4k8S+BnW38VWr6hpoO2O7txuk+rV6XTZI0ljaOVQ6MMFSOCKAKGka9puu2wm0y7jnGMsqtkp7Edq0a4LWPhtHFcNqHhG6fSbsfMYojhJj/tVDp/xAvtGvV0zxzYm1nJ+W6iGYiPUmgD0OiobS8tr+2W4sp0nhbo6HINTUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVk674m0vw7bGXUrlEfGViB+d/YCgDWrlvEfj7SfD58kM17eE4FtbfMwPuO1c42p+KvH7NFpEb6NozHBuX+Wb8BXUeG/A2k+HSJ0j+1X5GHvJuXegDmItD8V+OXW48Q3TaRp3VLa2PzSr6NXcaJ4d0vw7a/Z9JtEt1PLFRyx9Sa06KACsTxb4jh8M6FLePh5j8sEXeRvQVsyypDE0krBEUZZj0ArzbS45PiD4ybV7hT/Y2mSbbVG/jkHU/SgC54X8CWt9ok934rtkvb3Usu5lGSiMOF/CovAXwi03wB4n1DU9MmZ47tNqo3/LPnoK9EooACAQQehryTUtPk+HniZ7mJCdD1B8tjpFIepPpXrdVdS0621bTprK+iEsEy7WU1UZOLuhNXVjmEdJYlliYPG4yrDoRS1yd1a6r8Obry5RLqOhSN+7kAy8PsfYV0enanY6vAJtMuY7hcZIQ5K+xrsjNSWhyyi4lmiiirJCiiigApQM59ByTUN5d22n25nv50t4h/FIcCuXF3qvjq8On+Hle10wHE96wwWH+z61MpKK1KUW9hJopfiB4kXSbPJ0ezcNdzD7shB6A161bwR2trFbwqFjiQIoHYAVR0HQrLw7pUdhp0YSNOWPdz3JrSrilJyd2dMVZWIL20ivrKW2nQOkilSDXAfDa7l0bV9U8H3jlmsX82KRv41Y9B9K9Grzn4kWz6Hqum+LrMFRZyBblV/5aKTgZqSj0YgMCCAQeoNcJrnhC80i/bXPBz+TcL80tnnEcw7/AI12llcpe2MFzGQVljVxj3GanoA5zwt4xs/EcJiYG2v4+JbaXhge+B6V0dcp4o8Fx6vKNR0qU2Oqx8rPHxv9jVfw340ka8OieJ4/sWqRfKGbhJvdT60AdnRRRQAUUUUAFFFFABRRRQAUUVy/ijxpb6Hizsk+2anJ8sdvHyVPq3oKANLxB4k0/wAOWRuL+UbiP3cK/fkPoBXIWWjap49vU1LxKGttJQ5t7EceaPVxV3w/4LuLm+GueMJPteoMd0cBOY7f2FdwBgYHSgCO3t4bS3SC2jWOJBhVUcAV578UbganPp3he1RWvL9w+7GSqA8ivQridLa2lnlOEiQux9gM1514EgfxN4q1HxZeDfDvMenk/wAKdDQBl6h+z74fk1yx1XSibSW2ZWaJfuuR3r1kwq9t5MgDIU2sD3GMVJRQB5HcWknw98TypICdE1F8o3aBs9K6wEFQynKsMgjvXQ6zo9nruly2OoRiSKQd+qnsR715jL/a3w9uPs+qiS/0UtiO5UZdB/tV0U6ltGYzh1R1tFQWN/aapbifTrhLiM90OcVPXSYhRRRQIKKKgvr600y3M+o3EdvGBkFzjP0oGT8BSzEKqjLMegFclHZy/EXxMlvEGXQ9NkDyS/8APSQdAPUUsLar8Qbo2mlJJZaKjYnuGGGk9l9RXp2j6PaaHpkVjYRhIoxj3Pua5qlS+iNoQ6suoixoqIAqqMADtS0UVzmxwvxB0O5jaHxPoo26hp4zJt6vEOorpvDuu23iLQ7fUbU4Eq5ZD1Q+hrTdFkQo4DKwwQe9eaoz/DvxuyOT/YWrPkMf+WUp6Ae1AHo11awXts9vdxLLFIMMjDIIrgL/AMFar4Zu21PwPcsFBy2myH90R7e9eiAggEHIPeigDkfDfxAsNXk+xaip07UUO1oZ/l3t/s56111YHiTwbpfiWPddReVdqP3V1Hw8Z9RXJx674h+H8qW3iOKTU9JztivIhukH+9QB6XRVLStYsdas1udNuEnjI52nO0+hq7QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/AJL/AOEP+vSb+teoV5f4s/5L/wCEP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAch4/8Nyapp8epaZmPU7A+bHIn3nUclPxrR8IeJI/E2hR3XCXKfJcRd439DW9XmutQyeAfGCa7aIf7I1B9l3Gn8Mh6N9KAPSqKZDNHcQpNA4eNxlWU8EU+gAqrqOmWerWbWuo26XEL9UccVaooA84u/BOt+GLo33ge8LRg86dO37sD2rS0D4kWV7N9h1yJ9Lv0+VhONqO3+ya7WsfX/C2k+JINmp2qSOo+SXHzIfUGgDXVgyhlIIIyCO9LXmbWPizwC7SafM2taODudJTmVB6KK6nw7440jxGipBKbe6PW1n+WQfhQB0dFFFABRRRQAUUU2SRIYmklYIijLMTgAUAOqtf6lZ6XbG41C5jt4h1eRsCuN1r4jxm6OmeFLZtTvm4WRBmJT7mq9h4A1DW7pdQ8c3rXDk5Ono2YVoAju/HOteJrlrHwNYEqCRLd3I2pj1U960NC+G1pbXAvvEVxJrF9ncGuOREfRa7G0s7ewtUtrOFIYUGFRBgCpqAEVQqgKAAOgFLRRQAUUVheLvEkXhnQpLs4e4b5beLvI/oKAOc8c6rc6zqkPhDRXPnTkG9kX/lnFx0967PSNKttF0uGxs1CxxKFz3Y+prm/AHhuTTbGTVdUzJqeoHzZHf7yKeQn4V2NABRRRQAUUUUAMmhjuIWinQSRuMMrDIIrz3W/hXEs7X3hK7fS7gfN9njOI5D716LRTTsB47LqHjzw8ude0qC6gHRrT5mIqGP4qWRfZN4d1iMjgs0JxXtFMkijlQpKiup6hhmtFVkjN04njcvxU0+NsJoGrzD+9HCSKkh17xl4gGPDOirAD/FfDbgV6/FbwwJthiRB6KMVJQ6smCpxPM9J+GF3qNwt741v3umPJsUbMQr0WysrbTrRLWxhSCGMYVEGAKnorNtvctKwUUUUhhVPV9Og1bSbiyuYxJHKhGD69quUUAef/C7UZreG98M6k5a902Qn5v7hPFegV5r42ifwv420vxRbDy7SR/L1Fh3XoK9HhlS4gjmiOUkUMp9QRmgB9YviTwvY+JbMR3S7J4+YbhfvRn2raooA890zxHqPhDUI9G8W7pLVztttQHIx/tntXoEciTRLJEwdGGVYHIIqtqel2esWElnqEKzQuMFWHT3rgc6v8N7n5zLqWgM3J6yQ/wD1hQB6TRVXTdStNWsY7uwmWaGQZDKc49qtUAFFFFABQSFBJOAOSahu7u3sbV7i8lWGFBlnc4Arz671fVviBePp3h8yWOkI2Li9PDP7J6igC7r3jK71G/Oh+Doxc3bfLJc/8s4h359a1PC/gy10AG6uHa81KXmW6l5bJ7CtPQvD9h4esBa6fEFH8b/xOfU1p0AFFFNkkWKJ5HOFRSxPoBQBw3xP1i4i0230PS2/4mGpOFVR3jzhq6nw/o9voOhW2nWi7Y4U6e561w3hWNvF3xBv/EV0C1rp7GLT27EHrXpdABRRRQAVFc20N5bvBdRrLE4wyMMgipaKAPN9Z+FrW1y194OvX0+Uci0BxExrFm1jxv4eX/io9HjuIh0ayG4kV7FRVxnKOxLinueLxfFSyeTZN4f1eE93eEhaJPipYq+yLw9q8pPRkhJGa9llginjKTRq6nswzRHDFCm2KNUX0UYq/bSJ9nE8gi1jxx4gXHhzR4rdD1a9G0gVs6P8LGurhb3xhfSahJ1+yE5iU16TRUSnKW5SilsRW1rBZ2yW9rEsUSDCoowAKlooqCgooooAKyfEug2/iPQ57C4UbmXMb90fsRWtRQBw/wAP9enIn8N6yxGo6cdil/vTIP4q7iuF+IGh3MMsPinRBtv7D/W7eskQ6iun8P65beIdFg1C0PyyrlkPVD6GgDTpssSTRNHKgdGGGVhkEU6igDz/AFXwJe6PetqvgW4+yz53NYscRSH1NXPDnxAhvJ/7N8QxHTNTT5WWT5Ukb/ZPeu0rF8ReFNM8TWvl38I85R+6nX78Z9RQBtdelFeaLeeJfh5KE1ESaxomcCcfNMn1HpXc6Lr+na/aCfTLlJhjLKD8yexHagDSooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAqnq2mW+saXPY3a5imQqfUZ7irlFAHn3gbU7jQtXm8H6w3zwktYyH/lpF7+9eg1yHj/AMNyapp8ep6ZmPU7A+bG6fecDnZ+NaPhDxJH4m0KO54S5T5LiLvG/oaAN6iiigAooooAK5XxF8P9K12Q3UG7T9Q6i7t+HrqqKAPNIvEPibwM6W/ia1fUtNB2x3UA3SfVq7rR9f03XrYTaZdRzgDLKrcp7EVfkjSWNo5VDIwwVI4IrhdZ+G0QuDqHhS6k0m7U7vKhOI5T/tUAd5RXlkvxabwbMdO+IcAt7sLuSWAZRx/jT4Na8RfEuLf4fcadoMhwbrOJj9KAOm8R/EDSdAPkRlr69JwLa2+ZwfcVz0WgeKfHDrceI7ptK0/70dtbH5pF9GrqPDngfSfDmJoo/tN7/Hdzcu1dHQBm6L4e0zw/a+RpVpHAp+8VHLH1NaVFFABRRRQAUUUUAMmmjghaWZwkaDLMxwAK840eKT4geMn1m6Rv7H059tpG/wDFIP4vpVnxzqdxrurQ+D9Gb55sNfSD+CL2967XSdMt9H0uCxtFxFCgUepx3NAFyiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMvxJo0Gv+HrrT7lN6SISB/tDkfrXNfDDWZ7nRZdH1R/+JlpzlJEPUJnC/pXc15p4hRvB/wASrTXYgVstUPlXr9lx0oA9LopqOskauhyrAEH1FOoAKbLEk0TRyqHRxhlI4Ip1FAHnmpeHdT8HX0mreEyZbJjun08/dUdyorqvDfiew8TWPnWbFJV4lgfh4z7itmuL8SeDJ/t/9t+FZfsmppy8Y4Sf/eoA7Ss3XNfsPD9i1zqEwQY+VM/M59AK4tfizbQwPp93aSjXk+T7GBy7eo9qt6H4NvNUvhrfjNxPdMd0VmDmOEdvxoAp2umar8Q7xb/Ww9loiHMFmOGmHq4r0G0s7ewtUtrOJYoYxhUUcCpgAoAUYA4AFFABRRRQAVxnxM16bSvDn2TT/mvr5xCiDqVPDGuyJCqSxwByTXmmlg+NPindahLk2WhnyoP7shPegDs/Cegw+HPDVrp0HOxdzE9Sx5NbNFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAI6LIhRwGVhgg9681iZ/h545aJiRoerPuDN/yzlPRR7V6XWR4n0CDxHoU9jOo3sMxOeqP2IoA1wQRkcg0VxPw/1+eRJvDusMV1HTj5YL9ZkH8VdtQAUUUUAI6LIhR1DKwwQe9cJrPw9a2um1TwbOdOvQdxgU4jmP8AtV3lFAHDaH8QgLsaX4sg/s3UF4MjDETn/ZNdwrK6BkIKkZBHeszXPDmmeIrTyNUtklwPkcj5kPqDXDlPE3w7kLRmTWdDzk55mj9gPSgD0yisjQPE2meJLQTadOGfGXhJ+eP2IrXoAKKKKACiiigAooooAKKKKACiiuU8VfEbw/4SiP265E04ODbwHc/5UCbS1Z1dFeGar8ddWvn/AOKT0hdnrefLWU/xU+IE7b5LeyhPTah4qHUgupxVMwwlN2lUVz6Ior53T4rfEG2yY7Wxnz1Eh6Vt6T8ebizTb4s0l1k9bRdwoVSL2Y6ePwtV2hUTZ7bRXP8Ahrxroniq2STTLtDKwyYGOHX6iugqzt3CiiigAooooAKKKKACiiuY8Xp4sKwP4RNsXU5kW4OARQB09Fcn4S8Xy6tcTaVrUItdXtR+9Qfdb3WusoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAK811qGTwB4wTXLRCdJ1B9l3En8Mh/jPtXpVU9W0y31jS57G8XdFMhU+oz3FAFmGaO4hSaBw8bjKsp4Ip9efeBtTudD1abwfrLfPDlrFz/HF7+9eg0AFFFFABRTZZUhiaSVwiKMszHAArhdZ+I6SXR03wlbNqd63CzIMwof9o0AdlqOp2Wk2pudSuY7aEfxyHArgrrxvrfii5ax8EWBVASJLy5GEI9VNSad8Pr7WbtdS8dXrXUpOTYI2YFrvbS0t7G1S3s4UhhQYVEGAKAOM0r4Y6ftabxQ51u5k5P2oblQ+gqne+Cda8N3Z1DwPenYOunTHEQHtXotFAHE6D8SLK7m+w6/E+lXyfKRcDakjf7JrtVYMoZSCCMgjvWTr3hfSfEcGzVLRJXUfu5MfMh9Qa4l9P8WeAWaXTJ31rSAd0kcxzKo9FoA9NornPDnjnSPEQWOKU214etpP8sg/CujoAKKKKACsHxf4kj8M6FJdcNcv8lvF3kf0FbU88VtA807hI0G5mboBXnWjQSePvGDa7dof7I099lpE/wDFIP4xQBs+APDcml6fJqep5k1O/PmyO/3kB52fhXYUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjXw+niXwpd6exwzDehHUEcit+igDkfhx4gfWvDKQ3nyXtmTDLGeoC8A111eZXGfBXxYW5/5c9fwj/3YmFemgggEHIPSgAooqO4uIbS3ee5kWKJBlnY4AFAEnTrXE+IvGlxNff2J4RiF5qMnytN1jh9cn1qhf67qvjm9fS/C+6201Di4vzxuH+wa67w94asPDdl5FkmZG/1szfekPqaAOSX4VQSWZvLi9lbXj84v/4lb0HtVrQvGN3pt+ND8Yx/Z7pflju/+Wco7c+td1WZrugWHiHT2tdQiDD+Bx95D6igDTBDAEHIPINFec2er6r4AvU03xAZL3SXOLe9HLRj0evQba6gvLZLi1lWWJxlXU5BoAlooo6UAcp8RvER8PeEppIRvuLgiCNB1+bjI+lWfA3h4eHPC1taMd87DfLIerE881yuT41+LDKebHQPldD92Vj0NemAAAAcAUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHCeP9Fuba4h8VaIMX1jjzgOskXcV1Oga3beINFt9QtD8sqglT1Q+hrQdFljZJFDKwwQe4rzaBn+HfjdoHJ/sPVX3Kzf8spT0Ue1AHpdFAIIBByDRQAUUUUAFBAIweRRRQBxHiD4fJLcnU/DE7aZqKncREcJMf9qodH8fXNhfDSfG1v8AYbscC6A/cv8Aj613tZ+saHp+vWRtdUtknj6jcOVPqKAL0ciTRLJEwdGGVYHIIp1eaSaX4j+H0jXGkSyato4O6WCU5kjHotdd4d8X6V4lhBspglxjL20nEifUUAbtFFFABRRRQAVHcXEVrbvPcSLHFGMs7HAAqSvC/ij42k8Q6i3h7RZyLOE/6VKh4f1WplJRV2Y168MPTdSb0RL44+Kl3rMsuk+EHMUA+WW96H/gNcBDp0STG4uXa7uj96eU5Y1Yhhjt4VigUKi9AKfXDOo5n5/js0rYuVr2j2/zFyaSiisjyQozxggEfSiigCmbAw3H2rSriTT7vr5sJwTXqXgP4sN5kWjeLiIrjhY7r+F/qfWvOahurWK9gMUwyOoPcGtoVXH0PZwGbVsLJRk7x7f5H1QjrIiuhDKwyCO4pa8i+E3juQyjwxrs3+kJxbSuf9YPSvXa7k01dH39KrCrBVIO6YUUUUzQKKKKACiiigDzHxq39n/GDwhJb/I19K0cpH8QHrXp1eX/ABD/AOSteAv+vl69QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAOP8f8AhuTVNPj1LTMx6nYHzYnT7zgc7PxrS8IeJI/E2hR3PC3MfyXMXeN/St6vJvFurWXwq8WDXTMP7P1A4uLOM/OXP8QFAHrNcp4j+IOlaC5t4d2oX2dv2a2+ZgfeucGo+KPiIAdJB0jQpQMztxMw9RXWeHPBGk+G1WSCL7RefxXc3MjfU0ActF4d8UeN5FuPE122maf96K2tThnX0eu60bQNM0C18jSrSO3U/e2jlj6mtGigAooooAKKKKACiiigDlvEfgDStedrmIGw1A8i8t+HrnI9f8T+BZEt/Edq+qaYPljubcbpAPVq9MpskaSxskihkYYIPQigDP0bxBpmv23naXdxzgD5lU8p7EVpVwmtfDWA3H2/wrcvpF2p3eXCcRyt/tVg6r8U9U8FafLa+M7HZfEbbe4iH7pz2yaANjxvqVxr+sweENHY5lw19IP4Y/T6122l6bb6RpkFjZrtihQKPf3rlvhvo8MGjHWHuFu7zUT5skwOdoP8I+ldpQAUUUUAFFFFABRRRQAUUUUAFFNkkSKNpJWCIoyzE8AVwWv/ABTsrWc2Ph2B9Vu2+UPCN0aN/tGi1wO/pGZUUs5CgdSa8gl/4WF4iTbqlzBptuehtThsVXT4a3JffceMtXlB+9Gz8VqqUmQ6kUezJIkq7o2DL6g5p1eMSfDSUnNv4v1a3H91HOKlhsPHnh0E6HqSamo4xetnIodKSEqkT2KivN9F+KRhuV0/xhZPp9x0a5A/cn8a9DtrqC9t0ntJVmicZV0OQazatuaXuS0UUUgCiiigAooooA5j4geHj4h8KzwwER3MP72KQdQV5wKd4B8QjxJ4Tt7lhslizDIp65XjNdIQGUhhkEYIrx/Udfi+F/xAuYPLe5h1rH2W0h5KP649KAPUtY1qx0Kwa71KdYo1HGTyx9BXDQ2ur/Ea6FzfiTT9BRv3UHR5v972q1pHhC+16/XWvGjiVid0NiD+7Qdsj1rvVVUQKgAVRgAdhQBBYWFrplklpYQrDBGMKijirFFFABRRRQBBe2VvqFo9teRLLDIMMjDg15/cWOq/Dq7a80vzb/Q5GzNbdXh/3favR6RlDqVYAqRgg96AKGja3Y69YLd6dMsiEfMAeVPoazfHPiBPDfhS5vD99/3MY77m4FYmseEb7QtQbW/Br+W4O6axP+rkHfA9a5uDX0+J3jmxsPKeC10wFr20m4JcdDigDtvh34fbQ/DERu/mvbn97PIerZ5Ga6ukVQihVGABgCloAKKKKACiiigAoopk00dvC807rHGgyzMcACgB9Feea78U4FuW0/wvavqV0eFnQZiU+5rBmt/H/iJcazfRaZEfu/Y2wwFXGEpbEuSW56+7pGhaRgqjuTSo6yKGRgwPcGvGI/hrcb91z4w1acHrGz8UP8NJ92638Y6tCByEV+Kr2MifaRPaKK8eit/iB4eUnRr6HUox94XjZOK3dD+KkDXK2Hii1k0y56GZxiJj7GocJR3KUk9j0SimQTxXMCTQSLJG4yrKcgin1JQUUUUAFFFFABRRRQAVkeJ9Ag8R6FPYzKN7DMTnrG/YiteigDifh9r80sEvh/V2K6jpx8v5/vSoP4q7auD8faLc2d1D4r0UYvLL/j4Uf8tIu4rq9C1q21/R4NQtD8kqgle6n0NAGjRRRQAUUUUAFFFFABXHeIvh9Z6lOdQ0eRtM1NTu86DjzD6N7V2NFAHnmm+OtQ0K+Gk+OLUwSAgJfRj90R7n1rstQ1yx0/QJtYlnRrOKMyeYp4I9qn1LTLLV7NrXUrdLiFuqOM18nfGvWz4d1CTwp4f1aaawYbpoi2RGT/AKAPY/hb8abbx1rN9pd4qQXEbk2/YSJnj8a9Yr869E1m88P6xb6lp0hjngcMpB6+1fcfw08eWfjzwpBfQuBdIoWePPIYdTQBF8U/Fo8K+E38liLy9Jht8dmPevCNOtTa2p38zTHzJT6setdZ8YtTfVfiNbaNndBZxCfPYNXPMcsTXHXld8p8ZxDiXKpGgtlqxKKKK5j5cKKKKACiiigAooooAp36zxGHULA7b2zYPCw7etfR3gfxLB4r8KWuowNkldkmf74HNfPvUEeoxXbfAjVWttS1Xw43EcH75Pck811UJfZPreHsS7yw79V+p7bRRRXWfXBRRRQAUUUUAeX/EP/krXgL/r5evUK8v+If8AyVrwF/18vXqFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/5L/4Q/69Jv616hXl/iz/AJL/AOEP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFNlljgiaWZwiIMszHAAoAdVTUtUsdItDc6lcx20I/jkOBXG6x8RxNdnTPCFq2p3jcCdBmJD7motO+H15q92upeOLxruYnJskP7kfhQBDdeNtd8U3DWXgexMaAkPe3S4Qr6qauaf8LNOe0uD4jmk1a6uUw0k5z5RP9yu3tbSCytkt7SJYYUGFRBgCpaAPP/BGq3OiavN4P1lvngy1i5/ji9/evQK4/wAf+G5NU0+PU9MzHqdgfNidPvOB/B9K0vCHiSPxLoSXPC3MfyXMXeN+4oA3qKKKACiiigAooooAKKKKACiiigCO4uIrW3ee4cRxRjczN0ArzbS7AfErxBPqmtW4k0S1YxW1vKOHYfx1a8a6jceI9bg8IaO338PfyDoIvT613Omadb6TpsFjaLtigQIv4UAcJfeB9Y8O3jaj4HvSM9bCdv3QHsKvaD8SLS5mFh4ihfSb5flPnjakjf7Jrt6ydd8M6V4ig8vU7VJXAwkmPmQ+oNAGqrK6hkIKkZBHelrzN9M8V+AWaXSJ31jSAd0kUx3SqPRa6jw5450nxEFijkNrenraT8SD8KAOkooooAKKKKACq99fW+m2Mt3eyrFDEu5mY9KsE4GTXk+talL8QPEr6dayFdEsX/fMvSZx1U04pydkJuyuNvdT1X4iXRW2aXTtBjbG4cPN/wDWrd0rRdO0ODytLtUhB+8wHLn1NW4oo4IUhgUJFGNqqOwp1dsYKK0OaUnIM0UUVZAUUUUAV9Q0+z1W1NvqVulxF/dcdK5qJdX+H919r0lpL7Rif31qeWiH+yK62l7EEZBGCPUVMoqS1KjJx2Og0bWbPXtLiv8ATpRJFIO3VT3B96v15ELiX4e+JVvbckaHfPi4i/hhJPUV61BMlxbxzRNuSRQykdwa4pRcXZnUndXH0UUVIwopksscETSzOERBlmY4AFef6l4j1PxjfyaR4QzHZodtzqB4BHoh9aANHxL40kjvBovhiMXuqyfKWXlIPdq5rXfh1dw+HZdZkuXu/EEWJVlY5EeDkhfwrvPDXhax8M2hS1UyXEnM1w/LyH3NbMiLJGyOMqwII9jQBjeENei8R+GLW/hOSV2P/vDg1t15p4XdvCHxDvvD0x8vT70+ZYIe7dWr0ugAooooAKKKKACiiigDH8V65F4e8N3V/M23apVD/tkcfrXnug/D69u9CTxAtw9p4iuMyu4OA/OQD+FX/F0jeLfHun+Goj5mn2582/A/hYcrXpMcaxRJGgwqKFA9hQByHhnxo1zdnRvEcX2LVYvl+bhJvda7GsLxN4UsvEtqFnzDcx8w3MfDofrXOaT4n1DwvqEei+MAfKY7be/H3COwY+tAHoFFIjrIiujBlYZBHcUuRnGaACiikd1jRnchVUZJPYUAVNV1S00bTZb2/lEcMYyST1PoPevL7m61f4izl52k03QQ3yIOJJR7+1O1C9k+IXieSEMV0TTnwfSZgetdUqqiKiKFRRgAV0U6d9WYznbRFTTNKsNGthBplslunfaOtW6KK6TEKKKKBBVXUtLsdYtzBqdslwnbcPu+9WqKBnKWs+r/AA8ufNt3k1DQnb95G3Lw/T2r1DS9UtdY06K9sZRJDIMgjtXNEK6Mkih0cYZT0Irk7O/l+HXiZAWZtB1GTbt/54yHoB7VzVKdtUbQnfRnrlFIrBlDKcgjIIpa5zYKKKKACiiigAooooAbJGksbJIoZGGCD0Irza1d/h540a0kY/2Jqj7o2b/lnKf4R7V6XWP4n8PweJNDmsZgBIRmGQ9Y37EUAbAORkciiuK+H+vzzRTaBrBKalp58v5/vSoP4q7WgAooooAKKKrXeo2dgu68uY4R/ttigCzRXNXXxD8KWblJdctPMH/LNXyxrCvPi/pkGfsWl39/jp9njzmgDu72TyrGaQ5+VCeOtfGvid/h5qOu37yzammoPOwdm+5nNfQn/CwfEWtoU0bw3c2wkBA+1pjH1r5A8VxXMPizUlvkCXHnsXVegOaANabwbbTu0lhrFoIcZUSvhq3fhp4q1H4ceMopYn+12Ex2XCxHK49R71w+ieHtU8RXottItJLh8/MUXIUeprqrzSrbwQUtTeLe6w5H7uE5jTPY+9AHoWq6rFr3jq71S3JMcqfLnqB6U6sjTLSSw1ARzDEk0AlYehNa9efV+Nn5znF/rs7hRRRWR5QUUUUAFFFFABRRRQADrWx8MbgWvxVIJx9oQL9axx1rV+G9sbr4qow5+zqGNbUfjPbyK/11W7M+jqKKK7z9ACiiigAooooA8v8AiH/yVrwF/wBfL16hXl/xD/5K14C/6+Xr1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigAorzH4jeOfEujeMNL8PeFIbRri8haZnuegArD/4Sb4tf3NF/KuHEZjhMNPkrVFF9mbQoVaivCNz2qivFh4m+LZIATRefauo+FXjTWPFkWr2/iKGCO80258ljB91qeHx+FxUnGhUUmuwp0alNXmrHoNFY3iDxXpXhuAtqNyqykZjgB+eT2ArjDd+LPH7FbKNtF0Zzhmk+Wcj1FdpkdB4j+IWl6G5trYNqN/nb9ltvmYH3rAi8N+JvG0i3Hii7OnWH3ora2OGYej11XhzwVpPhtFe3i8+7/iu5eZG+proaAM/R9C03QbXyNKtI7dD97YOWPqa0KKKACiiigArzXWoZPAPjGPXLRCdJ1B9l1En8Mh/jNelVT1bS7fWdLnsLxd0UyFT6jPcUAWYZo7iFJoHDxuMqy9CKfXn3gbVLnQ9Wn8H6y3zw5ayc/xxe/vXoNABRRRQAUUUUAFFFFABXP8AjLxKnhnQ2nTDXcx8u2jP8b9hW5c3EVpbSXFzII4o1LOzdAK868O28vjrxZJ4j1CM/wBm2bGOzhfozD/loKANzwD4afR9Me+1DL6lft50zP1TP8I9q62iigAooooAK5fxH4B0nXy06q1jfE5F3b/K9dRRQB5nHrninwLIlv4gtm1XSx8sdxbjMij1eupbx1oR8OXOsQ3sckNtGXkUNyp9DXQyRpLGySKGRhgg9CK+RPjz4i0iLxBJo/hRvs8a8XawHCO3cGgD1j4YfHa28Ya7c6Tq4jtpjIfsrdA654H1r2Wvzhtbqeyuo7m0laKaJtyOpwQa+vfgn8YIfGOmppGsyLHq1uoUEn/XDtj3oA7P4keIpNB8Lutm2L27PlW/+9WL4U0aPRPD0MKjEs/76Y/7Z61m+PZv7e+JelaPyF00i6Yf3s11jkFzgYHYV1UY6XMKr6DaKKK3MQooooAKKKKACiiigCpq2mwaxpFxYXS7o5Fzj3HSofhVrk89jdaBqLFr3TWwf9zotaQ4IrkLGb/hH/jNG6cDWwIyPpWNaN1c1pvWx69VPVNVstGsXu9RnSGJB1Y9T6Cs7xN4rsfDNoHuCZbmTiG2j5eQ+wrm9L8L6j4qvo9Z8YkiIENBYD7oHbcPWuQ6Cuq6z8R7rdIJNN0BW4H3ZJv/AKxrvtM0yz0ixSz0+BYYUHCqMfjVmONIo1jjUKijCqOgFOoAKKKKAOE+KOkzPpEGu6YhbUtMcPER2Un5q6nw/q0Ot6Da31u4dZIxuI/vY5/Wr08K3FvJDIMrIpUj2IrzrwFM3hvxZqnhO4Oy2RvMsi38eeTigD0miiigAooooAKz9d1SLR9Eur6ZwgijJUnu2OB+daFebfEG4fxD4n0rwhbkmC4bzrp0/g2nIBoAufC7TJW0658RaihXUNVctID2UHiu9qO2gS1to4IlCpGoUAe1SUAFUtW0iz1vT3s9RhWWJx3H3T6j3q7SMwRGY8ADJoA8r1HXbr4QRNJrE732gsT5JJzKp7CvFLn4+6zefE221mORotMik2C1zwUJ6n3rY+NNtr3i3XJvsms215awnEenwP8AOPqPWvENQ0m/0mURalaSWznoJBigD9CdG1a11zR7fUbCQSQXCBlYGuV+KfiGfSPDX2TTyDeXziFR3CngmvHP2b/iT9muj4T1WY+XJzasx4XHavQPFD/258YtOgJ/0fT4iHTsxPeqiruwm7K5teH9Hh0HQrexh52ruZj1JPNaNKetJXecgUUUUCCiiigAooooAKztf0mPW9CuLJ13OVJhP91+xrRpVbawYdqBlX4V6/Jqnh6XTrty93pT/Z5Wbqx55rua8j8NyHw/8aJNJi4h1S3a5cjpur1yuCSs7HWndXCiikZlRSzEADuakYtFY974u0DTiwvdWtodvXc/SsO7+KvhqIH7Fd/bz2FvzmgDtKK8zf4uzXEpg0zwrqsjnpK0fyUh1v4lauN2jadZ2i/9PYxQB6bUc9xFbRmSeRY0HUscV5t/wi3j7Vhu1rWIbdm+8LU4A+lPg+DkPm+ZfeJNVuieWR5floAo/ELW9D0rULfxRpus2sd9akLNHv5lj7ge9ayfGPQrjTYLuwguLzzV3FIVyV9jWpa/DHwtBzcaZFdnsZxuxXL6fplj8N/HD2ps410fVXzFKV/1cp/hHtQBYHxV1PUcrpPhXUY8cbriPANKbv4rajhrODTLWBv+ev3gK9NGMDHTtiigDzL/AIQLxVq/GteIZbYHr9kfFWbP4P2EJzfa1qWoe1xJmvRKKAOWtfht4TtgrHRbaaUf8tZFyxretNKsLAAWdpFDjpsXGKt0UAFfOmtfAV9c8e6hruv30dlohcu7Zw1fRdch8R/Ax8d+Hjp66hPZMMkeW2A/s3tQB85+Nfibo/h2yk8NfDS1S2iQeXNqAHzyfQ1594CspNe+I2lW85aU3F0PMZuc+pNXfGHwv1zwnr0umFPt7xp5jNbjdtX1Ndd+zbog1P4iyTSpxZxeZkjoc0Adt8TdJ/4R/wCJEEsSbbKa1WNP96scjBxXr/xk8Kvr/hVb61BNzpjeeir1fHavGbO6+2WiTYw+PnX+6fSuKvG0rnxPEGHcayrLZ6fMnooornPmgooooAKKKKACiiigAHr6c12HwN06S78VavrbL+4ZBHGfcda4TUbl7e2CwIZLiY7I0HVs8V9AfDPwqPCXgu2smJaWX99IT1y3OK6sPHXmPquHsO3OVd7bL9TrqKKK6z7EKKKKACiiigDy/wCIf/JWvAX/AF8vXqFeX/EP/krXgL/r5evUKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/wAl/wDCH/XpN/WvUK8v8Wf8l/8ACH/XpN/WvUKACiiigAooooAKKKKAPGviD/yXXw5/14yfzNbR61i/EEZ+O3hwD/nxk/ma3Chz2/OvzDiv/f1/hX6n0WWfwfmCffH1rx3TPiHrXhPxR4n0bw3pcl/eXl5v+QZKjHP869jRDvHTr61x/wAOPDaa5qPjKWCQ22ow6gPIuE6g4JAPtmujhD/ean+H9TPNP4cfUr6H4hs7CX7brXgvxBqeoMdxa4j3CM/7NdePjNIAAPBOugDoBF/9aug8NeLbn7cdD8UKLfVI+FkxhJx6iuxr9HPBPL/+Fzyf9CTr3/fr/wCtR/wueT/oSde/79f/AFq9QooA8v8A+Fzyf9CTr3/fr/61H/C55P8AoSde/wC/X/1q9QooA8v/AOFzyf8AQk69/wB+v/rUf8Lnk/6EnXv+/X/1q9QooA8v/wCFzyf9CTr3/fr/AOtR/wALnk/6EnXv+/X/ANavUKKAPCPGHjyfX0trrTvBeuQanaSB4pvKxkD+En0rY0j48jUbP5fCerz3EPyXIhjyEfuK9frzfWlm8BeLk1m0UnR9RfZdRIPuOf4zQBX/AOFzyf8AQk69/wB+v/rUf8Lnk/6EnXv+/X/1q9NhmjuIEmgcPG4yrL0Ip9AHl/8AwueT/oSde/79f/Wo/wCFzyf9CTr3/fr/AOtXqFFAHl//AAueT/oSde/79f8A1qP+Fzyf9CTr3/fr/wCtXqFc94z8Sr4a0RpYsNezny7WM873PSgDyPxb8XrrxQsei6R4Y1bG/N/GU+by+4FdJp/xYj0vT4bKy8Da6kECBEAi7Cuv8CeHZNI0przUSZNSvm864ZhypP8ACPauqoA8v/4XPJ/0JOvf9+v/AK1H/C55P+hJ17/v1/8AWr1CigDy/wD4XPJ/0JOvf9+v/rUf8Lnk/wChJ17/AL9f/Wr1CigDy/8A4XPJ/wBCTr3/AH6/+tR/wueT/oSde/79f/Wr1CigDy1/jJJJGyHwVrwDDGRFXyd44aGTxfezW9rcWqyyFzFc/fBPrX6B18a/FHwlrWtfEy/mis3S2Z+blhhFHqaAPJq0NDvNQ07WILzR2dbqFgylOtdDc6BoHh7Kazfi/mYZT7G2Qv1qHSWvfFWt2Oi6LapA8koRZI1+bb6k0Ae2fDXxTceOfiXfalfoElgsVjIH94CvXK840Dw5B4L+LMtlb8CXT0Eh/vPjk16QeDXZS+A5qnxCUUUVqZhRRRQAUUUUAFFFFABXlvxj16fwnr/hfXLKLzbiCRgqf3s16lXn/jrSofEnxB8K6Vc58oOxbHY9qzq/Ay4fEU/D/jQwXh1rXfBuuX2pynerGPckQ6jZXWf8Lnk/6EnXv+/X/wBatfwhr13p2rTeFvET/wClQnNrM3AmTsB7gV3NcR1Hl/8AwueT/oSde/79f/Wo/wCFzyf9CTr3/fr/AOtXqFFAHl//AAueT/oSde/79f8A1qP+Fzyf9CTr3/fr/wCtXqFFAHl//C55P+hJ17/v1/8AWrj/ABj8RLi71TTdds/CGsW0+nPlpJI8AqeoNfQFQXtql7Yz20ihllQqQR6igDzC0+OIvLVJ7fwdrcqMPvpFkE96m/4XPJ/0JOvf9+v/AK1XPhtey6Tf6j4Rv3Jk0+QtDI3/AC0Vjnj6V6JQB5f/AMLnk/6EnXv+/X/1qP8Ahc8n/Qk69/36/wDrV6hRQB5VdfHD7JbPNP4O1uJFGdzxYA+tcn4P+Is0Gral4gvPCOsXU+ovlHjjyqKOwrvviTfzalqGm+EbFysmouGldOqKp6H613ljaR2FhDawqFSJAoAHoKAPNv8Ahc8n/Qk69/36/wDrUf8AC55P+hJ17/v1/wDWr1CigDy//hc8n/Qk69/36/8ArVHP8ZpGt5FPgrXQChHMXtXqlRXWfsk2OuxsflQB8Aa9rNynjC+1Cw8+wkklLbc4ZfrUlv411BQ39opHqLHo1yNxFdRq3w38UeK/iBqC2GmTCJ5z+/dfk/Ou90r9nbSfD2mtqXxE1hIIIxuKxNgE+lAHkGmTwXWpw3ej29wuqJIJAsP3Vwf5V7x8MNXu/EviS/1HVABdw4RsV5p4o8e6TLNH4e8BabHZ2rSCL7Zt/evk4616V8N9Hl8LeIP7MuWYzTIHYt1bIzWlL40RP4T1M9aKU9aSu05QooooAKKKKACiiigAooooA8x8f+LbfwV8T9J1S5tpZv8AR9gMYrsI/GPjXXoUk8P6RHAkgyj3I4IrG1ewtdb+M+maTfQrNE1kZCrDOPet+ex8Q+ALlrnSnk1XRCcyWzcyR/7vtXFU+NnVD4URDR/ibqny6vqFhbR+lrwRSr8I57xxNqvijVDJ3jilwtdp4e8T6b4ls/O0+YeYv+thb78Z9CK2KzLOLs/hX4cgUC8ga/I6m4Od31rbs/B/h6wx9i0e0hx02RgVs0UANSNIkCxqFUdABinUUUAFFFFABWN4p8PQeJdCmsZQBJjdDJ3jfsRWzRQBxXw/8QzXME2hauSmpacfLIf70qD+Ou1rgvH2j3Gn3cPi3RVxd2ePtKjrJF3FdboesW2vaPb6haH5JkDbe6n0NAGhRRRQAUUUUAFYXi7xJF4Z0N7psNcSfJbxd5H9BW1PPFbQPNO4jjQZZmPAFecaLDL4+8YSa5eIRpGnvstIn/icfxigDY8DeFms9Mnv9aQT3+pHzJTKMlVP8H0rY0nwlouh6jNe6TYxWks4w/lrgGtqigBGVXQq4DKRgg96+ePiB4Mm8E66+oWStJo96+5+5ic9c+1fRFVdS0211bT5bK/iWWGVcMrCplFSVmc2Jw0MTSdKpsz5lBVlDIQysMgjvS1ueMfh/qngq6a50qKS/wBHkb7ijLxf/WrnLW9t71SbaQMR95e6n0rgnBwep+eY3L62Dlaauu/QnooxRWZ54UUUYzQAU2SVIImllYKijJJqC6v4LRhG7b5m4SJeSxrtPAnw0vvEtxHq3ieJrbT0OYbRuGc/7XtWkKbmz08DltbGS0Vo9yX4W+B59c1WPxNrMRjtIG/0OFhyx9T7V7mBgYFRwQRWtukFugjjjG1VUcAVJXfGKirI/Q6FCFCmqdNaIKKKKo2CiiigAooooA8v+If/ACVrwF/18vXqFeX/ABD/AOSteAv+vl69QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigDifG3wv0rxxqlpqN7d3dpdWiGOOS2facGue/4UHpn/Qx61/3/AK9XoqJU4Sd5RT+RSlJbM8o/4UHpg6eI9a/7/wBdb4F+H+m+ArS7h0ye4nN3J5ssk7bmZq6qiiNOEfhSXohOTe7MPxN4XtPElntlzDcx8w3CcMh+vpWFoPie70bUBoHiw+XKPlt7s/dlHYZ9a7msvX/D9j4i09ra+jBP/LOQfeQ+oqxGpRXA6Pr194T1JNB8VEvbscWmoH7pHZWPrXeqwZQykEEZBHegBaKKKACiiigAooooAKp6tpdvrOlz2F4u6KZCp9R7irlFAHn3gbVLnQ9Xn8Iay3zwZayc/wAUXYfWvQa4/wAf+G5NU0+PU9MBTU9PbzYmT70gH8P0rS8IeJI/E2hJc8Lcx/Jcxj/lm/cUAb1FFFAEV1cw2drJc3MgjiiUs7noAK878NW0vjfxXJ4l1FD9gtGMdlC/3WI/5aCpfGV/P4o8QQeEdHY+XxJfSj7vl91+td3p1hBpenQ2VomyGFAiD2oAs0UUUAFFFFABRRRQAUUUUAFfPf7QnhDxBHBJr2i3sq6cFxc26Nj8a961LUrXSbCS8v5VihjGSSa4a1ttR+Id+LvUla18Pxk+TbHhpz6t7UAfD9fUH7Nnw8NlZP4p1KHEs3yQK45UD+Kl8Yfs5wXvjeyvtDKw6ZLKPtUPdR7V7zp2nwaXp0FlaIEhgQIoHoBQB5r8VbV9N1/Q9dtlwBP5d0/+xxXRrKs8azR8pINy/StjxVoEPibw5daZMdvnJhXHVT61wHgfVZriwl0fUFMd7pzGLY3UoOjV0UZdDGoup09FFFdJgFFFFABRRRQAUUUUAKODk9Bya5TwfF/b3xZ1W+kG+2sEX7O3+13q/wCLtbOh6E7248y9n/dwQjq+eDiui+H3hkeG/DEUcvzXNx++lY9ctzj8K560tLG1Na3Dxv4YfXLFLzTmEWqWR8y2l9/SpfBfihPEWl7J1MOoW37u4gb7ykcZ+hrpK8+8X6VdeHNaTxbocZYKQL+BOsqetcxueg0VS0jVbXWtLhvrGQSRSrnI7HuKu0AFFFFABRRRQB5x8RbWTQ9d0vxhaqfLs38u5RP4wxwCa9BtLmO8s4rmFgySoGBHuKr6zpkWr6Pc2U6hllQgA+uOK474X6lLBbXfhjUmJvtLchie6k8UAd/UV1cJaWc1xIQFiQuSfYZqWuB+KGpzyWtn4b01yt9qbjbj+4DzQBV+Hdu/iDXdS8XXY3pcvstA3/LMDg4r0iqGi6XBo2jW1jaoESJAMD171foAKKKKACgjIwehoooA5vxl4iXwX4YuNTt9OkuigJ8uBe/qa+MPHnxK13x5qLy6nO0dvn5LZDhQPpX3ZetbpYzNeBTAqEyb+mO9fM3ib4Pt8QbzU9e8IQx2dqjHyI8YE2OuKAPM/hB4d/4SX4ladZsMojeax7DbzX0r8RIDofjzQtfUbLZU8iX0JPArj/2b/Al9o2p6rf63ZPbXMDeVGJBgkd8V7P428OR+J/DFxZOP3qjzIT6OORTTs7iaurFA8gN/eAb86Sue8F6xJqWlPZX3Go2B8u4U9uwroa707q5yNWdgooopiCiiigAooooAKVdobLnCjqTSVz3jTV207RTaWuWvL8+RGq9Vz/FSbsrjSvoQfDqH+3/H+r67MCfsDm1iY9wc9K9Y6jBrn/BPh0eGvC9rZOAbjbuncdXY+tdBXA3d3OtKyscV4g8ArNef2t4ZmOnaknzbUOI5T/tCmaB4+Zbv+yvF0H9nagp2iVhiOY/7NdxWVr/hvTfEdkbfUoFc4wkoHzRn1BpDNUEEAg5B70V5ql14h+Hcyx3wfVdBBwki8yxD1b2rvNJ1mw1yxW70y4SeI9Sp6H0NAF6iiigAooooAKKKKAGyRpLG0cqhkYYKnoRXm2nu/wAPfGr6dOx/sXVJN8Lt/BKf4R7V6XWL4r8Ow+JdClspAFlxugkPWN+xFAG1RXGfD/xFNeW0uiasSmp6cfLYP96VR/HXZ0AFFFc/4y8Sp4a0Rpkw13MfLto/779hQBzvjfU7jX9Zh8H6O3MuHvpB/DF6D3rt9L0230jTILGzXbFCgVfU47mud8B+Gn0jTXv9RzJqd8fNmd/vLn+H6V1tABRRRQAUUUUANkjSWNkkUMjDBUjgivPPFXwb0PXpjdafu0256/uPlVj7ivRaKNyZRjJWkro+db/4W+ONI3GGW2vbfPyhB8341gzWHiu1J87w1eShTg+WnWvqiisnRg+h5dTJ8FUd3C3ofLUGk+LrxlWHw9cw7+hkXp9a6HT/AIR+MtWZW1K7t7S1PVV4evoSihUoLoVSynB03dQv6nC+E/hNoHhn968Z1C56+bc/MVPtXdAAAADAFFFanppKKsgooooGFFFFABRRRQAUUUUAeX/EP/krXgL/AK+Xr1CvL/iH/wAla8Bf9fL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABXFal8UNI0fV4tM1GGaC5mYLGjfxZrta82+M3gzT9e8Lvqsji21DTv3tvcDg5HagDp9b8Y2eh3dla3EEsk17/qlTvW/DJ50KyFSm4Z2nqK8f8Agzqp8Zo+o6+6y6lY4jjjbqgHGcV7HQAUUUUAFFFFABRRWJr/AIx0LwyAus6jDbSsMxxufmf2FAHE+KyD+0B4RAOStpNn2616jXnHhOwuvEvjO48W6jbNBAg2WCv97ae9ej0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAUNZ0az13TZLLUIg8bjg91PqPeuM0/VtR8Dagmk+IGafSnbbaXnXYOwY16FVTU9MtdXsJLS+iWWJxjBHQ+o96ALMciTRrJEwdGGQwPBFOrzqG41D4dXwt71pLzQZW+SY8tB9favQLW6gvbZLi1lWWGQZR1PBFAEtFFFABRRRQAUUUUAFea6zDJ4B8Yx63aITpOoP5d1EnRHP8Zr0qqeraXbazpc9heLuimQqfUe4oAswzR3MCTQOJI3GVZehFc/418TDw3o26H5r25PlWqernpXOeENcfwtd3nhnxHMI1s1MlrM/Qw9s+9J4ZtJvGniuXxNqcZ+w2xMdlC4+VsfxigDd8B+GW0LSWub7L6let51w7dVJ/hHtXVUUUAFFFFABRRRQAUUUUAFZ2t65ZaBp7XeoShFHCr3c+gqLxF4jsvDmnm4vG3SNxFAv3pW9BXMaJ4dvvEuoLr3ixcL1trE/djHYketAEWn6PqHjjUU1bxCHg0yNs21l03+7D0r0CONIYljiUIijCqBgAUqqEUKoAAGAB2paACiiigArz/wAd+GLmG7XxN4eQ/bIObiJP+WyelegUU07O6E1c8+0TXLXXrET2rgSrxLEeqN3FaNUfE/gKUXja14TcWl+vzND0jl+o9awrDxzHHP8AY/FNrJpN0Dt3zDCyH2rrhUUtznlBrY6uimwSxXUIltpUkjPRgafg1qZiUUuDSOVijMkjqqL1JI4oAKrajqVppFk11fzLFGo4z3PpWFqnjmxtpDbaNE+rXp48m3GSp96u6B4Hv9cvI9Y8aMHxhobJfuqP9oVnOoomkYNkfg7QrrxPrI8Ua5E0dvE3+gW79R/tEe9em01EWONUjUKqjAA6AU6uNtt3Z0JW0CmyRrLG0cihkYYIPcU6ikM81Qy/DnxWEfJ0DUX+U/w2z98/WvSUdZI1dGDKwyCO4qjrej22u6TNYXqBo5B+R7GuQ8GaxdaNqknhPX3PnwnNpO3SZewH0oA76iiigAooooAK808Zxt4S8aWHiqH5LKVvL1Ajvnha9LrI8UaHB4i8O3Wn3Kblddyj/aHI/WgDS+0x/YvtWf3Xl+Zn2xmvOvBkTeK/G+o+J7keZZwt5ensf4ezVzreM7//AIQo+FBIT4jWTyTH38vOP5V6v4Z0WDw/4etdPtk2KiAsP9o8n9aANWiiigAooooAKKK4/wAc+JZ7KOPRdF/earffKijnYp4JoAyvEl9ceNPEH/CMaTIyWULA386dsfw59672wsLfTLCKzs4lihiXaqqMCsrwl4ag8M6Mlsn7y4f555m+87H1NbtACBFViyqAT1IHWloooA858b+HbnStTXxR4fj/AHq/8fcCD/WjuxqzpGsWeuWIurCUOOjr3Q+ld4yh1KsAVIwQe4rz3xJ4DurK8bWvBjrbXQ5ktT/q5B349a2p1OXR7Gc4c2qNOiuX03xzatN9j8QQSaTeDjE4wHPqK6eJ0niEsMiujdCCOa6k09jnaa3FopcGjBpiEopJXSGIyTOqIOpJFcvqPjq0WX7L4ehfV7wnaVgGfLPvSbS3Gk3sburavaaJYPd30gUAfIndz6Cqngrw5c6zqh8VeIYyrkYs7dv4E9SPWn+G/At5qF+mteM3Weccw2g+5GPcetehqoRQqgAAYAHauWpU5tFsdEIW1YtFFFYmgUUUUANliSaJopUDo4wysMgiuC1fwPe6PfnWPBE4tphzJZsf3Tjvgetd/RQByXhnx5a6xOdP1OF9O1NODBNxv919q62uf8TeDtO8TQ5nDQXS/cuoeHX8a5iz8Sa14Jul07xbGbnTxxDqEY4Rf9s+tAHo9FQWV7bajaJc2MyTwyDKuhyDU9ABRRRQAUUUUAcD490i50y+h8XaIuLm1wLpR/y0i7j611+i6vb65pEGoWhzHMgbb3U+hq7JGk0bRyqHRhgqRwRXm2nPJ8PvGz6ZMx/sXVH3QO38Ep/hHtQB6Nc3MNpbSXFzII4o13O7dAK878P20vjrxVJ4h1CMjTbN/Ls4X6MR/GKm8ZahceJ9eh8IaO2EOJL6UH5fL7rn1rudN0+30rTYLK0XbDAgRR7CgC1RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX/ABD/AOSteAv+vl69Qry/4h/8la8Bf9fL16hQAUUUUAFFFcF4h+K9h4b1+LSr/TLvzZmCxuB8r/SgDvaKo3Op/Z9FbUPs8jhY/M8pfvYxmsvwh4wh8YWUl3a2U9tCjFQ0w+8R1xQB0VFFFABRRRQAV55eT6j4r8bRabeaTcQaRa/NI8g+SY16HRQB4d420PXfBvxMsfEHgXSZZ7ecBbyCFfkI+lbnii8vb7xf4XdJrqwW/Uie2LYxXqtcp4k8KXOs+JtL1WCdIzp5yqnvnrQBxtro1zD4/v8Aw8NXvXsbtdxLSfMh/wBk1tfDa8u49Z13RJrmS5t9PlCwySnLEH1rVXwper46bXftEfllNuzHOaf4X8KXOha7qmozzpIdQfcwXtQB1dFFFABXO+KvA2heMYVTWrNZXQfJIPvL9DXRUUAec+DtRuPDPiqbwZqE5ngA3ae7/e2DqDXo1eXeKgF/aA8JFBtLWs27HfrXqNABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBDd2kF9avb3cSyxSDDIwyDXASR6h8N78zW6yXvh+ZvnjHLWx9R/s16LTJYo54XimQPG4wysMgigCKxv7bUrOO6sZlmhkGVZTVivO7yw1H4fag9/oyNdaHK2bi1HLRH1X2rt9J1a01rT47ywlEkbjt1B9DQBdooooAKKKKACiiub8beJh4c0b/R/mv7o+Vap6uelAHAfFm3l8Y6imjeHI919ZL588yfxqP+Wea77wFq1nqfhW2SzAje1UQzRDjy3HUVH4F8MnQdJM95l9SvT51y7dQx7D2rntahk8A+MY9bs0J0nUH8u6iTojn+M0AelUUyGaO4gSaBw8bjcrL0Ip9ABRRRQAUUUUAFYfibxRaeG7IPKDNcyfLDbp96Rqg8U+LoNBRbW3X7TqU3ENunJ+p9qoeGfCU320674lf7TqUvKIeVhHbA9aAIvDvhi91LUR4h8XES3jc29t/BAvbj1rt6KKACiiigAooooAKKKKACs3WPD2l69B5eqWcU+BhWZeV+hrSooA8wvPg8lrK114d1e7t5z0ikkzGPwqk3hD4m2vOn6zpzMevmqTxXrlFUpSWzE4pnkY8KfFG741DWNNVV+75SkVZtvhHdamyzeJ9buXkU/6u2fah+tep0UOUnuxKKRi6F4S0fw6g/s6zjSXGDMR87fU1tUUVJQUUUUAFFFFABXMeNvDDa9p63Fiwi1O0+e3l7gjt+NdPRQBzPgrxOPEGlmK6Uw6ja/u7iBvvAjjP4101cD4w0i70LVl8WaDGWeM/wCmQJ1mX1/Cuv0XWLXXdJh1CxkDxSjt2PcUAX6KKKACkZgiFmOFUZJ9KWuQ+JXiCXQ/Ckq2Pz3t0whijHUhuCRQB5Qtwi/tBv4mMIOkf6oT44LdK+hUYOispyGGRXBN4Btv+FXDRgT5qp9o3/xb/vfzrR+HHiCTXfCkQvfkvbYmKWM9RjgE0AdbRRRQAUUVDeXcNjZy3Ny4SOJSzEn0oAzPFHiK38NaJJe3By/3Yk7s56frWH4G8O3CPL4i14b9Vvjuw3/LFewX0rN0K1m8eeJD4g1NT/ZVqxWxgYcP/tEV6OBgYHSgAooooAKKKKACiiigDJ1vwxpPiCIrqdnHK+MLIV+Zfoa4S6+EEmnu1z4Z1m6imY58ueTKD6CvUaKabWwrXPIz4T+KFr/x4axppB6+apNA8J/E+6/4/wDWNNXHTylIr1yiq55dxcsex5dbfB9791uPEmtXcswPMcEmEP4V3ei+GNI8PxBdLsooXxhpAvzN9TWtRUtt7lWsFFFFIAooooAKKKKACiiigAqG7s7e/tmt72FJ4X4ZHGQamooA85vfDGteDbl9R8HSmezJ3TWEhzx6IK6Xwz4z07xKnlxE216g/eWkvDpXQ1yfifwJZ60323T3On6nGdyTw/LvPo3qKAOsorz/AEvxvfaHfLpHjeAwSZ2pfgfupPb6130UqTRLJEwdGGVYHgigB1FFFABXFfFKWy/4RU286hr6Y7bHH3hL2IrsLu6hsbSW5upBHDEpZ3PQAV554XtZvGniqTxRqUZFlbExWULjhsf8tBQBT+CKiz0e5sdYDprySFrgT/6xh6j2r1SuA8eaTc6VqMPi/Rh/pFrgXaD/AJaRDt9a7HRtWt9c0mC/szmOZA2O6+xoAvUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHF/EHw3c6mtjrOljdqGkuZYV/veopPC3xQ0LxFcx6c832XVSdjWkn3tw64qH4ka7dW8mmeHtOkMN1rMhiSRTyoHWtXw94C0Pw+I54bKOW+UZa7kXMhbuc0AdNRRRQAV518ZvDA1jwkdTtUY6hphE0JTrxyRXotMmiSeB4pQGR1KsD3BoA8stfHc3iX4WWT6dKo1a7Ih8peoxw1ejaFpkOkaNb2kCbFVAWH+0ev615F4A8Btpvxe1eeKfOm2LbrdB0y3Wu5+KKTr4Qe5s7qW2mjkQK0bYzk80AdpRXkOtWF94R1vQ9W07VLmZbkKtxBM+5WyByBXrcT+ZCjnqyg0APooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/yX/wAIf9ek39a9Qry/xZ/yX/wh/wBek39a9QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigBHRZEZJFDKwwQRwRXAaroWoeENRfWvDIMlox3XNl1GO5UV6BQQCCCMg9RQBmaDr9l4h05buwfPZ4z96M+hrTrhde8NXmh6i3iDwkNso5uLMfdlXvgetdD4b8S2XiSw861OyZOJoG+9E3oaANmiiigCG8u4bCzlurpxHDCpZ2PYCvPvC1pN4z8Uy+KdSjP2O3JisYnHysB0cCneLr6fxZ4kg8J6S+IFIkvph93b3T613thYwabYQ2dogSGFQqKOwoAsVT1bS7bWdLnsL1d0M6FW9R9KuUUAefeBtUudD1afwhrTfvIMvZuejRdh9a9BrkPH/huTVNPj1PTAV1PT286Ip1kx/CfatHwh4kj8TaElzkC5j/d3MY/gcdRQBvUUUUAFcr4q8XHS5F0zR4jeavPwkSc+V/tN7VD4n8WzRXY0Tw3H9q1WbjcOVgH95queFfCUWgRtc3T/AGrU5+Z7huTnuB7UAQ+FvCA01zqWsOLvVZvmeVufL9lrqqKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAR0WRGRxlWGCD3FebN5nw38VhsH/hH9RfBI+7bN/9evSqo6zpNtrmkzWF6geKVcYPY9jQBcR1kjV42DKwBUjuKdXAeDdWutB1iTwlr7nzE5sp36TL6D6V39AASACScAda8ztwfGfxYkmk+ax0HiIj7sjH+ddN4+8QHw74TubmLDTyfuo07ktxxSfD/wAP/wDCP+E7eGT5ribMsrnqS3ODQB0+BjGOPSvM5c+C/iyjJxY6/wAyE/diYfyzXplct8QvD/8Ab/hSdIvluIMTRuv3ht5wKAOpBBAI5Borm/AfiD/hIvCdtdSYWZR5cidwV45rpKAAkAEk4ArzjW7qfx94kPh/T3ZNJtHBvpl/jI6AH0rS8b+IroSR+HfD536ne/KzjkRJ3z74rd8MeHLXwzosVlbAs+Myyt952PUk0AaVpaQWNpHbWsaxxRqFVVGABU1FFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAFLVdIsdasmtdSt0njPQMM7T6j3rgnsPEHw7uDLpRk1XQycvbt80sf0PpXpVBAIIIyD1FAGR4f8Tab4lsvP02dWZeJIj96M+hFa9cRr/gJje/2t4TnGnakvO1eIpD/tCsS9+KV3puntpOrWTWmvyfuYCwwkr9mHtQBd8W30/i3xJD4U0l8QIRJfTD7pTun1rvbCxg02whs7RAkMKhUUdhWB4H8MDQNIMt0fM1G8PnXMh5+Y9h7V09ADZYkmiaOVQ6MMMpHBFebaZJJ8PfGbaVcMf7F1N91vI38Ep/hHtXpdYnizw7F4l0KWzcBZwN0EneN+xFAG3RXG/D7xFLf2Mmk6qSmpaefKdX+9Io6NXZUAFFFFABRRRQAUUUUAFFFGR60AFFGRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/xD/5K34B/wCvl69Qry/4h/8AJWvAX/Xy9eoUAFFFFABXl3jLXPiPb+L/ALB4b06GXS5QFE7Jkrnqc16jRQBgeD9Ak0DQ0gu5BNduS80v94ntWJ8X5ok8ATxyTeU8ksew++a7qq95YWmoRiO9t450ByFkXIzQBymm+GbnVn0u+16eOdLOJTAkY4PA5NdmAAABwBTURY0VI1CqowAOgFOoAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigApkzMkDtGu5gpIHqafRQBwngPxR4l1vWdVtvEOktZwW8pW3k243Cu7oxRQAUUUUAFFFFABRRRQAUUUUAFcV4k8K3Vrf/wDCQeFSIL9OZoB92de/HrXa0UAYHhfxXbeIrYqVNvexcTW78MpqDxx4m/4R7Rttt81/dnybZR/fPQn2qj4x8NpEH8RaXOLG9tFMjsOFkA5Oa5P4c6mPHviVtc8QOq3dqpjtrRhhWX/noBQB3Pgbwz/YGj+ZefPqN2fNupD/AHj2HtXT0UUAFFFFABXmuswSeAfGMet2aE6TqD+XdRJ0jc/xmvSqpavpltrGlXFjfKDBMhVvb3oAtQzx3ECTQOHjcblYdCK4vxD4qutQvzoPhPEt23yzXA5WEd/xrh9D8S6nc6jc/D7TrsEwSFRe9li/ug+teseHvDll4csBBZplzzJM33nPqTQBB4X8K2vhy0OCZ7yX5prh+WY9+fSt6iigAooooAKKKKACiiigAooooAKKKKACiiigAoPSiigDgdA8VeJ734iahpOp6Q0OlxDMNxt+9+Nd9RjnNFABRRRQAUUUUAFFFFABRRRQAUUUUAcz418L/wDCQaasto3lajanfbTDqCOcfjSeCfE/9u2D2t6ph1Oz+S4gbqMcZ/GunrzL4owP4Ut5fGmkuIZ4EKToP+W2eAT64oAL8nxl8VobNfnsdE+aZf4XY9M16aAFAAGAOAK4L4Q28MngqHVt4mu9QJkml7k56V3tABSEBlKsMgjBFLRQB5npZPg34q3OnyfLZa2d9sv8KEda6/xb4lh8NaM9ww8y4f5IIl6sx6fhWH8V9NE3hJ9UgdYbzT2Escp6gA5IH1rJ8CRy/EC4g8XatzaxL5dlbnsRwWYUAb/gbw1NYxS6zrR83Vr875GPRB2A9K7Cjp0ooAKKKKACiiigAooooAKKKKACiiigAPArgPCvirxRqnjzVNN1bSTb6ZbsRBcbcb67+jAHQUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBBfXsGnWM15duI4YULux7AV5hpvhmP4lavceItdieO2CmGxC8Hb2cVf8U3k/jLxRD4X0pz9jgIkvpx90gdY69Bs7OGwsorS1QJDCoVFHYUAedpfa/8ADuZYNSSXVdEJws68vCPVjXe6Vq9jrVkt3plwlxC3RlNWpYY54WimRZI3GGVhkEVwWq+Cb/Q75tX8Dzi3l6y2b8xuvoo9aAPQKK5Pwz47tNZm/s/UUOn6qnDW03Bb3FdZQB5/460q40fU4PF2jKRLb4F4i/xxf412ekarb61pUF/aNmOZQ2O49jVqWKOeFopkDo4wykcEV5xpMsnw/wDGL6RdMf7I1J99tI3RJD/DQB6VRRRQAUUVT1XVbTRtOlvtQmWGCJcszGgC1JIkUbSSMFRRksTgCvNvFPxp0bR5Ta6NE+r3H3WMH3Yz71594w8eap45umgs3l0/R42+XacPL759KwrW0gslxbRhCerY5b61zzrKOiPnsdndPDt06S5pfgbeo/EnxzrZdY5beytG6Kq4cfjWHcXXiS9INzrlwpHTy3IqcnPWiud1ZvqfOVM5xs38VvQit77xNZLtttbnZf8Apo+a6DTfip410Z0XUBb39onZF+c/jWJQCR0pqtNdR0s6xlN6yv6nsnhP4vaF4kZbe7zpd4x2rBcHBc+1d+CCAQcg9xXypdWNveENIu2UfdlXhl+hrrvA3xJvvClzHpfiF3u9Mc7Y7knLRH/aPpXRCspaM+lwGc0sS1TqLll+DPfaKitrmG8to7i2kWSKQblZTkEVLW57wUUUUAFFFFABRRRQB5f8Q/8AkrXgL/r5evUK8v8AiH/yVrwF/wBfL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX+LP+S/+EP8Ar0m/rXqFeX+LP+S/+EP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFcx458Tf8I/o2y1+a/uz5Nso7MehPtQBgeK72fxf4mg8LaQ5FtEfMvp1+7gdUrY13wLa3NjbtoeLC+sh/o8kfGcdm9RVjwP4Z/wCEe0bddfPqF2fNupD3c+ldNQByfhbxc9/O2ka7H9k1eDhkbgTD+8vtXWVz3irwnB4hgWaFvs2owfNBcpwQfQ+orO8NeLLhb06F4nT7PqUXCyHhZx6igDsqKKjuLiK1t3nuJFjijG5mY8AUAOd1jQvIwVVGST0FcBqes6h411B9G8Ns0GnocXd92Yd1U+tR3V7qPxCvmstLZ7XQ42xNcdDN7LXc6VpVpo2nx2dhEI4oxjgcn3PvQBxviXwHBa+HYJvDkRi1DTj50bp9+cjsx710Hg/xJH4m0JLkEC5j/d3Mf9xx1Fb1ea61DJ4B8Yx63ZoTpOoOI7qJOkbH+M0AelUUyCaO5gSaBw8bjcrDoRT6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArzPxMx8YfEWz8PxfvdPsctqMfY56ZrufEesQaFoF1f3L7FjQgH/aPT9a5j4X6PPBosus6ohGpak5eVz3XPy0AZ0iP8NvE6SRgjw9fMFYD7ts3b869JjkWWJZI2DI4DKR3Bqpq+lW2taXNY3sYeKVcYI6Hsa4zwfqlz4c1l/CWuyEkEmxnf/lqvpn2oA9ApGYIpZjgAZJPalrhvGuvXV7eR+FvDrZvrrieYdIE759zQBn6nJL8R/Ecmj27MugWZxdSKf8AXt6A0zwmx8HePbzw3OTHZXeG01OwA613Hh3QbXw5o8VhZrwgyzHqzdzXL/E/SJ30+21/S0J1DTHDKR2TPzUAd5RWdoGrwa7odtqFq++OVBz79/1rRoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuW8d+JjoGjiGz+bUb0+TbKOzHufaug1C/g0zT5r27cJDChdyfSuE8H2E/inxBN4t1dD5QzHYxMPl2dmx60Ab/gjwyPDuijz/nv7o+bdSHu5610tFFABRRRQBzvibwZpviSPfIpt7xeUuYvlcH3PpXN2XifWfBl0um+MImuLLIEWpRj5VXsG969GqC8srbULVre9hSaJ+qOMigBbS7t761S4tJVlikGVdTkEVk+LfDkXiXQ5LVsLcL81vL/zzfsa5O78Na14IuH1DwjI93Yk7prCQ7ifZPSqXiz432Gh+FDfWtnI9+fla2brC3+1QBseFvHNrZ6HNbeKrlLK704mOQzHBkUfxUeCfixpHjvxHfaZo6MVtF3eaTw49RXx14t8a6t4x1qXUdTl2vKMFI+Bj0rtv2eNaGk/EqOItgXieVj1oA+yndYo2eRgqqMkntXzv4+8Xz+N9feytnZNHsn2lR/y1cdc+or0P4z+KZNC8LJp9pn7Rqj+QGU8oD3rxyztRZWiQA5YD5m/vH1rnrT5VZHz+d454ekqUH70vyJwAqhVGFHAA7UUUVxHwgUUUUAFFFFABTZI0miaKVQyMMEGnUUBsdZ8LvGs/h7WIvDWqymSyumxaSMeUPpn0r3evlHUrZrm0zCxSeI743HVSOa9/wDhl4p/4SvwVbXbgiaL9zID1yvGa76M+ZWZ+gZNjniqPLP4onX0UUVse2FFFFABRRRQB5f8Q/8AkrXgL/r5evUK8v8AiH/yVrwF/wBfL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX+LP+S/+EP8Ar0m/rXqFeX+LP+S/+EP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRTEmjkZljkVivDBTnFPoAKKKKACiiigAooooAKKKKAIL69g06xmu7pwkMKF3Y9gK4DwlZTeLvE03izVEP2aPMVhGw+Up/fx60nim7m8Z+JovC+lORaQMJL6dfukDqleg2dnBYWUVraoI4Yl2oo7CgCaiiigArE8S+F7TxHZbJv3VzHzFOnDKfr6Vt1U1PVLTR7CS7v5ViiQZJPegDi9L8XT+F5W0rxvKsKxD9zfNwjr2H1qErqHxG1DLCSz8Pwtlexuff6VTvPD0vxcJk16BrXQYz/o8WMSSH+/n0rQ0TWbnwVexeHvEQCWPCWN4B8uOyn3oA7qysrfT7RLaziWKJBgKoxU9IrBlDKQQehFLQAVT1bS7bWdLnsL1N0M6lW9RVyigDz7wNqlzoerz+ENafMkHz2ch6NF2H1r0GuQ8f+G5dUsItT0sFdT09vOhKcGTH8J9q0fB/iSLxLoSXIIFzH+7uI+myQdRQBvUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFME0bSGNZFLjqobkfhT6ACiiigAooooAKKKKACiiigAooqnq2oQ6VpVxeXEiokSE5b1xwKAOB8cyv4m8Z6X4Wtz5lmG8y/C/wgcjNejwxJBBHDGMJGoVR7CuB+F2nzXUN74o1JCt9qchBDdkB4xXoNABXOeM/DCeI9J/cnyr63O+3mX7ykc4z710dU9W1S10bTJr6+kEcUS5ye57CgDgbf4lG38NS2d5ET4jh/c/Yh94noG+net/wR4Xk0e0fUNUYTate/PPJ6Z6AV50+havfay3xMitQJoz+6sivzGMdWP4V6/oOt2viDR4dQsnDpIOR/dbuKANGo7mBLq1lgkGUlQo30IxUlFAHm/gGdvDPiTUfCN0dkEbl7Et/GDycV6RXnnxNsZdOudO8XWCF7nTXCFAPvKx5JrudNvotS02C7t3DpKgbI+lAFqiiigAooooAKKKKACiiigAooooAKKKYk8UjsiSIzL1UMCRQA+iiigAooooAKKKKACiiigAooooAKKKKACiiuT8eeJm0XS1s7AGTUr4+TAq9UJ/iPtQBh+IrqXxv4sj8N6c5/s61PmXs6/dYj+CvQ7W1hsrSK2tkCRRKFRR2FYfgzwyvhrRBHKQ97OfMupP77nrXQ0AFFFFABRRRQAUUUUAIzBELMcADJNfGfx28Z22v+MpbTSLcW9vbEpIyrt81+5NfZpAYEEZB6iuV8QfDbwt4liZNQ0uEFurxKFY/jQB8DVv+BNQ/srx3pF7u2iG5Via+hPE37Lum3ILeGr5rVuu2YlhXlHiH4F+MvDU5khtReRxnPmxMOMd+tAHpXxJ1Y+IPiPAqNus4rRZEH+1WSTk5rH0m8lv75ZLlSs0MAicH1FbFcFZ3mz89zuo542S7WQUUUVieMFFFFABRRRQAUUUUAA9PXiuu+B2oSWfizV9FZv3GwSRjtnvXIjrWr8OLj7L8Vo1zjz1C/Wt6DtM97Iajji+Xuj6OoooruPvQooooAKKKKAPL/iH/wAla8Bf9fL16hXl/wAQ/wDkrXgL/r5evUKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/yX/wh/wBek39a9Qry/wAWf8l/8If9ek39a9QoAKKKKACiiigAooooAKZNGZYHjB2llIyO1PooA4fwN4BuvCmsape3eqy3ovZC6I7EiMeldxRRQAUUUUAFFFFABRRRQAVy/jrxKdB0fybT59QvD5Nui9QT/F9K6C/vYdNsJry6cJDChdifQVwXhGxn8WeJJvFuqofITMdhGw42f3setAHQ+CfDQ8O6L/pHz39yfNupD3c10lFFABRRWR4i8SWPhvTzcXr5dvlihX70jdgKAJtc1yy0DTnu7+QKBwq93PoK47TdG1HxrqCat4jDQ6ajbray6Z9C1TaJ4bvfEWpL4g8WA5621kfuxr2yPWu7VQqhVAAHAA7UAJHGkUaxxKERRgKowAKp6xo1lrmnvZ6hCJI3HBxyp9Qexq9RQB55p+q6h4D1BNK14vcaU5xbXvXZ6K1egRSxzxLLC4dGGVYHgioNR0211WxktL6JZYpBggjp7iuDin1L4dXwhu2e70CVsJJ1a3+vtQB6NRUVrdQ3ltHcWsiyxSDcrKeCKloAK811mCTwD4yj1uzQnSdQYR3US9ImPVzXpVU9W0u21rS57C9TdDOu1vUUAWYJ47mBJoHDxyAMrDoRT68+8D6pc6Jq8/hDWnzLB89pIejRdh9a9BoAKKKKACiiigAooooAKKKKACiiigAoPIoooA4TQfh9d6T8QL/xDPq008NyMJbFjhK7uiigAooooAKKKKACiiigAooooAK84+I9zJrmsaX4RtWJjvZN10ydYwDkZr0C9uo7KxmuZmCpEhYk+wrgPhrayazqOpeLr1CJL5zHErD7iqeooA9As7aOzsobaIAJEgQAD0GKmoooAR3WONnchVUZJPYV5vI0vxI8UGGMkeHtPf527XLj0+lW/GWsXWt6onhPw+5E8p/0u4XpAvofrXXaHo1roOkw2FkgWOMc47nuaALqQRx24gVAIwu0L2x6V5xdJJ8OfFn2yIH+wdSfEwA+W3bt+delVU1TTLbV9Nmsr2MSRSrggjp70AWYpUmiSWJgyOAykdwadXnvhTUrnwtrx8J61IxhYk6fO/8Ay0X0zXoVAFbULKPUdOntJgCk0ZQ57ZGM1wnw0vZNKvdS8IXZOdMk/cu3WRSc8V6JXm/xFgbw5rmm+MbVTtt3ENxGv8YY4yaAPSKKitp1ubWKeMgrIgYEe4qWgAooooAKKKKACiiigAooooACMjFcH4X+H15oHjnU9dn1eW5hvGJS3ZjhK7yigAooooAKKKKACiiigAooooAKKKKACiiigCrqeo2+k6bPfXjhIYELMTXDeC9NuPEevT+L9ZQ/PmOwjYcCLs2PWoNenk8eeME0Cyc/2VYsJLuZejOP4D616NBBFa26QQIEjjXaqqOAKAJKKKKACiiigAooooAKKKKAGuxSNmC7iBnA714p4n/aU0fQdRuNPh0u4nuYGKNzgAivYNU1ex0Wye71K4SCFOrMa+RfjdpP9qa03ijQ9Ikg0mb5WuNuBI396gDQ1v8Aab8U3bldHjhtIz/eQE1f+G1l43+LOuC91rVbqDRoT+9KMVEp/uiuA+Fvw0vfiBr6R7Gj0+I5nmxxj0Ffa2haHY+HdGg03TIVighUKAoxn3NAHzbrOmxaL4/vdOgBEaLlc9SKK6P4x6Y+k/EO11rG2G9iEHsWrnSMEiuCsrTPz7PKbhjG+j1EooorE8UKKKKACiiigAooooAB1rZ+F9sLv4qOxGfs6BvpWN0BPoM12/wG0prm+1XxI3KXH7lPbaa3oK87n0GQU3LFOfRI9soooruPuwooooAKKKKAPL/iH/yVrwF/18vXqFeX/EP/AJK14C/6+Xr1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8AFn/Jf/CH/XpN/WvUK8v8Wf8AJf8Awh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKK5Xx54mOhaQLezBk1C9PkwIvVSeN30FAGF4lu5vGviqPwzpjn7BbHzL2demR/BXoNpaQ2NpFa2qCOGJdqKOwrC8FeGR4b0ULOQ9/cnzLqX+89dHQAUUVzfinxbFoaLaWifatSn4hgTk/U0AT+JvFNp4btAzgz3UnENsn3pG9KxvDnhe71DUP+Eh8Wfvbx+YLVuVtx6Y9al8MeEp1uzrfiVxdanJyqnlYR6AetdjQAUUUUAFFFFABUV1awXts9vdRrLE4wysMg1LRQB51JBffDe/M9qJLvw7M2ZUPLWx9fpXe2F/banZR3djMs0Eo3KympZYo54WimRZI3GGVhkEV59e6fqHw/wBQfUdFje60aVs3FoOTD6sPagD0SiqWk6vZ61p6XlhKJI3HbqD6GrtAHIeP/DcuqWEWp6WCuqac3nQlOC+P4T7Vo+D/ABJF4m0JLkEC4j/d3Cf3JB1Fb1ea6zBJ4C8ZR63ZoTpOoMI7qJekTd3NAHpVFMgnjuYEmgcPHIoZWHQin0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFMnlWCCSaQ4WNSxPsBQBwHxPv5b37B4WsnIuNUkAYr1VQa7fS7CLTNLt7OBAiRRhcD1xya4DwJC3ibxbqfiy4G+1ZvKst38G04OK9KoAK5jxr4o/sHTlt7NfP1O7/d20C9STxn8K19c1m00HSJtQvpAkca8Z/iPYVyXgvRrvV9Sk8Wa/GVnn/49YH/5Yp2I+tAGv4K8LnQNOaa8bztSuvnuJj1JPOPwrpqKKACiiigDnvGPhmPxJpBRD5d5B89vMv3lYc4z71U8DeJpNXtJNO1QeVqtidk8Z4J9DXWVwvjfQ7qyvYvFOgIfttr/AK+NP+Wyd8+uBQB3VUNb0uHWdFubG4QOssZCg9mxwfzqPw/rlr4h0aHULJwyuMMO6t3FadAHBfC/VJls7zw5qDl73SZCjs3cE8V3teaeMYz4S8dad4mgBjsZ28q+2/xsThc16TG6yxLIhyrAEH60AOooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5Dx94kfS9OTTdNBk1K/PlRKvVAf4q6PVtUt9G0qe/u2CxQIWPPX2rivBGl3Gu6tN4v1tSZJsrYoRwkX09aAOj8IeG4/DWhpbnD3Unz3Mv99+5reoooAKKKKACiiigAoorG8Q+KtK8NWol1K4VWbiOMcs59MUAbBYKMsQAO5ride+IsMN4dL8M2z6tqWdrJF0i9yayvK8UfEJz9oD6LojHmM8SSj1BrttB8M6X4dtVh023VWAwZWGXb6mgDlNL8A3urXq6r44vPtlwCDHbRnEaD0I711Ou+GdO17wzcaHcQRraTJs2qoAX6VsUUAYfhPwlpfg3RI9M0eERxoPmbux9TW5RRQBxfxR8JL4r8IyLEhe9s8zWuP74rwbTrlrm12zDFxCfLmU9Qw619W14b8U/A8uh37eJNEhLWsh/wBLhQfd9WrGtDmV1ueJnGAeKo80Pij/AFY46io4J47mFZoGDIwyKkrgPz9pp2YUUUUAFFFFABRRUN1dRWcJklPsqjqTTHGLk7R3Ir4zzy2+m2IJu71wkYH619H+CvDUHhTwta6bAu0qu+T3c9a4P4TeBJY2HiXXocXUvNvE4/1Y7GvW676UORan6JlWB+qUPe+J7/5BRRRWp6wUUUUAFFFFAHl/xD/5K14C/wCvl69Qry/4h/8AJWvAX/Xy9eoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/AJL/AOEP+vSb+teoV5f4s/5L/wCEP+vSb+teoUAFFFFABRRRQAUUUUAFFFMnl8mCSUjOxS2B3xQA+iuN8GfEKHxhqeoWcWnz2psnKFpBw/0rsqACiiigAooooArahfQaZp897dOEhgQuxPoK4PwdYz+KvEM3i/VkJiGY9PjYceX/AHsetR+I7mXxx4sj8Nac5/s60YSXs69Nw/gNeiW1tDZ20dvbRiOKNdqqo4AoAloorj/E/i6aK7GieG4vteqTcEryIB/eNAEvirxd/Zsq6Toyfa9YnGI4l58sf3jS+FfB66U7alqsn2vVZ/mklbkIfRfSp/CvhKHQIWuLl/tWpzndPctySfQegro6ACiiigAooooAKKKKACiiigApHRZEKOoZWGCCMg0tFAHn+q6DqHhHUX1rwuC9ox3XVj1yO5Wus0DxBZeItOW6sXyekkZ+8jdwa1CAQQRkHtXDa94YvNF1FvEHhEbZhzc2Y+7MvfA9aAO5qnq2l22taXPYXyb4Z12sKpeG/E1n4l0/z7Y7JkO2aBuGjbuK2aAPPvA2qXWiatP4Q1t8ywfPaSHgNH2X616DXIeP/Dcuq2EWp6WCuqac3mwFeC+P4T7Vo+D/ABJF4m0KO5UgXEf7u4ToVcdRQBvUUUUAFFFFABRRRQAUUUUAFFFBOATQAUVxWh/EaHW/G154dTTp4nteszD5W+ldrQAUUUUAFFFFABRRRQAVwnxR1aZNJg0LTJCupao4SHHoD836V3LuscbOxwqgkmvNfCqN4u+ImoeIZh5unWZEdgx7MOGoA7rw/pMOiaFa2NvGIxGg3Af3scn860JZEhiaSVgqICzE9gKdXn3izU7nxRrY8J6G7BAQb+4T/lkvpmgCtbrJ8R/FP2qQH+wNPfEQP3bhh/hXpKIqIqIAFUYAHYVU0rTLbR9MhsbOMJFEoGAOp7mrlABRRRQAUUUUAFIyhlKsAQRgg96WigDza+il+HXic6lbq39hX7gXCgcQMemB2r0aGaO4gSaFg8bqGVgeoqHUdPt9U0+azvI1kilUqQwz+NcJ4Vv7jwh4gPhPV5GNtISdPmf+JfTNAHW+KNFh1/w5dWM6b9yFox/tgfL+tYHww1qa98PtpmpPnUtOYxTqeo54/Su2rzPWgfB3xQtdUj/d6dquVvH7B+goA9MopFYMoZTkEZBpaACiiigAooooAKKKKACig8CuK8PfEeHxB41v/D0enTwvZEgzOPlb6UAdrRRRQAUUUUAFFFFABRRRQAUUVxvj/wARy2FkmkaUDLql+fLRV6xqer/hQBj6vLJ8QfF66NZs39kaa++6lXo0g/g969HhhjghSKFAkaDaqqMACsbwl4ci8M6HHaAh7hhuuJR/y0fua3KACiiigAooooAKjuLmG0gaa5kWONBksxwBXNeJfHum6BJ9khBvtSYfu7SHkt+Pauft/C2veNZ0vfGE72lhndHp0Z2sv+8e9AE2o+OtR8QXT6b4EtmnOdragy/u4j9Kv+Hvh3bWV1/aevzHVNTfl3k5jB9lPSuq0/TLPSrVbewt44I1GMIuM/WrVACABVCqAAOAB2paKKACiiigAooooAKZNDHcQtFOiyRuMMrDIIp9FAHifjf4T3WlzS6r4MjMiMd0tj6n2rzyLUUNybS9ja0vF4eGQYx+NfV9cz4o+H/h7xbB5eq2QDZz5sPyP+YrGdKMtTxsdlFDFvmXuy7/AOZ4F16EH6HNGD6V22p/Ae/t5c+F9cFtEOiTgsSPrWNcfCnxvaKCt1FdEnGFWud0J9D52pw/iov3WmjCwfSg4H3mVfqcVvwfCTxtdKC1/Dak9mXpW7pPwEMxz4r1dr0HqIMpTVCXUdPh/EyfvtJHm63sl3d/YtGtnv7w8COMdPxr1bwJ8JvJmi1jxaBPdjDR25+7H9R3rvfDvg/RfC1mlvpNmibf+WjAFz+NbldEKUYH0mByqhhPeWsu7/QRVCqFUAADAA7UtFFanrBRRRQAUUUUAFFFFAHl/wAQ/wDkrXgL/r5evUK8w8Xr/a/xi8KxWh3nTJGknxztBr0+gAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8l/8If8AXpN/WvUK8v8AFn/Jf/CH/XpN/WvUKACiiigAooooAKKKKACggEYIyDRRQBDDZ21uzNb28UTN94ogBP1qaiigAooooAK5Tx74mbRNIFrYgyajenyYEXqueN30FdFqWoQaXp097duEihQsxPtXCeDdPn8T+IZvF+roTGcpp8bDgR+uPWgDovBfhpfDeiKkxEl9cfvLqX++9dEeBk0EhQSxAA6k1wWteIL7xRqT6B4TcrEp23d8B8qDuo96AJfEPii91LUDoPhIeZcn5Z7ocrD61t+GPC1r4dtDg+deS/NPcNyXb29qn8P+HbLw5p4t7JMseZJW5Zz6k1rUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHF+JPClxb3/wDb/hY+TqEYzLAvCzj0x61qeF/Flt4htzGw+z30XE1u/BU10Fcl4o8Ivd3C6voEgtNVh5DLwJPYigDra821mCTwD4yj1uzQnSdQYR3US9Im7ua6Lwr4tXWN1hqUf2TVYOJIH43Y/iFberaXba1pc9hfJvhnXawoAsQTx3Nuk8Dh45FDKw6EGpK8+8DapdaJq8/hDW3zLB89pKejR9l+teg0AFFFFABRRRQAUUUUAFFUNR1zTdJtzPf3kUSDrlhn8q5m5+Lvgq0UGfWEAJxwjH+lAHXpZ20U5mjt4klbq6oAT+NTVxFt8YfBN2xEOsqSOTmNh/Sui0rxLpGtw+bpt9FKv+9g/kaANWigEEZByKKACiiigAoopGIVSzHAAyTQBxvxM1ybTPDRs9Ob/iY3zCKBB1bnn9K2fCOhxeH/AAzaWMK7SEDyf75GT+tcbpoPjL4qT30nz6fo2BauPuu5613HiLXbXw7os1/eNhUGFXuzHoBQBj+OPE8mkWkenaWPO1a+OyCJeT7mrXgzwxH4a0cJIfMvJz5lxMfvMx5xn2rI8D6DdXV3L4o8QJm/u+Yo3/5ZJ2x6ZFdzQAUUUUAFFFFABRRRQAUUUUAFc/4x8MxeJdGaH/V3UJ8yCZeGVh0GfeugooA5LwN4ml1S3l0vVv3erWB2ToeN3oR61c8c6AniLwpc2h++g86MjruXkVj+ONAube7h8UeH126hZ/61F/5ap3z68V0nh3XrXxHosV9aMCHGHU9VbuCKAMn4d6++ueFoReHbfW37qeI9VI4Ga6uvM+fBXxXdjxYa9l3c/djcdBXpgORkdDQAUUUUAFFFFABRR061m6p4h0rRoPO1G9ihT/eyfyoA0qhjs7WKZporeJJW+86oAT+Ncdc/GDwTakedrKDPTEbH+lLbfGDwTdZ8nWUOPVGH9KAO2orN0zxBpesQCbT72KVT/tYP5VpdelABRRRQAUUUUAFFFBOBk0AUdZ1a30TSbi/u2AjhQtjPLH0Fcd4E0m41bUZ/F+tKWnusizVv+WcR7Y9aqag8nxC8ZDTYGb+xdMfdcOvR5R29xXpEcaQxrHEoRFGFVRgAUAOooooAKKRmVFLOwUDqScVxGu/ENEvDpfha1bVtRztZY/uxe5PegDq9V1iw0Wye71K4SCJBkknn8q4KbX/EfjqY23hiJtO0w5DX8q/6xfb0q1pXw+utTvl1bxvem/ugQY4EOI0HoR3rvYYIreJYreNIo16Ki4A/CgDnvDXgfSvDce+KM3N253PcT/M2fYnpXSUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFcz4ws/FN5Hbx+E7+GyYn97JKm7ArpqKAOY8JeDYvDzS3t3MbvVLkf6RcHufb0rp6KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/wAl/wDCH/XpN/WvUK8v8Wf8l/8ACH/XpN/WvUKACiiigAooooAKKKKACiiigAooooAKKK5Px74lbRtKFlYAy6lenyoUXqueN34UAYXiGeXx14uTw5YOf7MsmEl7MvTeP4K9Djjt9PslRAsMEK4A6BQKwPCuhW/g/wAOFr2VftDDzbu4bjc3XNc/cXepfEK/a000vaaHG2JbjoZ/YUASalrGoeONQfRvDjNDpqHbd33r6qK7HRdEstB02Oy06IJGg5Y/ec+pPepdM0u00ewjs7CFYokGAAOT7n1q3QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBzPirwhHrWy+0+T7Jqtv8ANDOvGT6N61B4W8XPe3DaPryfZNXgGGVuBKP7wrra57xT4Tt/EVuskbfZr+A7oLlOCp9/UUAUfH3huXVLCLU9LUrqmnN5sBXgvj+E+1aPg/xJF4m0GO5U4uI/3dwnQq461leG/FlxFe/2F4oT7NqMfEcjcLOPUGsjWYZPAXjKPW7NCdI1BhHdRr0hP9+gD0qimQTx3Nuk0Dh45FDKw7g0+gAoorD8VeJ7bwvpLXUw8yZvlhhHWRvSgCxr3iHTvDmntd6nOI1H3V6lj6AV5xc+JvF3jN9miw/2JYdDNMMmVfb0p2maJea3errniyTz5m+aC2/hjXsCK6rgKFUBVHRR0FdMKXWRjKp0Rx9n8NtMS4N3qlzdXl033t0hKH8K24PDOh25JXS7Z8/34wa1KK3UUtjJtszZ/DeiXAAbSrVMf3IgKxdQ+HGiXkgnt3ubS4TlDDJtUH3FdZRQ4p7iTaORt9R8ZeCmDyN/bunDgQoPnjHrmvQfDHi7TfFNn5llJtmUfvYG4ZD6Vmg447HqPWub1nw1J9rGr+HJRY6lF83y8I/sRWE6K3iaxqdz1OiuX8F+L4/Elk0N0n2fUrb5biBuv+99DXUVzG4Vy3xC8QPoPhWd7TDXk/7uGPuxPBxXUngZNeZSZ8Z/FhUPzWOg/OG/hkY9qAOm8F6NB4U8GQpMwUlTcTO/UFhk5Nc7pcMvxD8T/wBr3it/Yli5FpGw4lYdSR3qTxJez+M/EH/CL6S5WxgIN/Mnp2UGu90+wt9MsIbO0jWOKJQqhRigCcAKoCjAHAApaKKACiiigAooooAKKKKACiiigAooooACAwIIyD1BrzbVYJvh94p/tiyVv7Fv3xdxAcRsehA7V6TVe/sYNSsJrO7jEkUqlWBGetAHL+PtFTxV4MaSykBkhxcwyJ1O3nANX/A/iEeJPC9tdsNk4GyWPupHFc14avJvB+vv4U1dy1jOSbCV+691JqHRyfBfxPudLl/48taJmtvSMjtQB6ZRRRQAVj+I/E+m+GNPN1qUwXskY5Zz2AFVfGHiuDwxpm/Hm3cvywQjqxPeuL0rw9c316Nb8Vyfar1uYoT9yIfSrhByZMpKJDca74z8ZuRp6f2DYdCZRlpR6g0lh8ONJt5jc301zeXDct5sm5SfpXXZ4CjhR0A6CkrqjTjE53NszIPDWiQZ26Xavn+/EDRP4a0S4xu0u1TH9yICtOitLIm5yV98ONJnm+02E1zZ3K/dMcmEH4UtvrvjPwY3/EwX+3rDoPKGGiX1JrrKXPGDyD1B6Gs5U4spTaNbw54p03xPZefp0wLrxJEeGQ+mK2a8q1Xw7cWd7/bfhaT7LfxcvEPuTDuMetdp4P8AFlv4o00uF8m8gOy5t26o1cs4OLOiMlI6GiiioKCuL8f+Ip7aGHQtGzJqeoHYFXrGh4LV0eu6zb6Do89/dkbYlJVc8uewFcp4C0a4v7ubxbrYJu7zJtkbrDEe1AHR+FvDsHhrRIrKLDS43TS95G9TWzRUVxcw2kDTXMqxRqMlmOBQBLWJ4i8XaT4ZthJqVyA7nEcS8szemBXLX/jnUvEd2+m+BbZpQDtfUWX5Ij9O9aXh34e2mnXP9pa3KdT1R+ZJZeUB9lPSgDFFv4p+IUhN4H0TRScGD/lpMvqD2rt9C8NaZ4ds1t9Mt1TAwZGGXb6mtUAKAAMAdAKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8AFn/Jf/CH/XpN/WvUK8v8Wf8AJf8Awh/16Tf1r1CgAooooAKKKKACiiigAoqOSeGHHnSpHn+8wFR/b7P/AJ+4P+/goAsUVX+32f8Az9wf9/BUscscq7opFdfVWBFAFfVNRg0nTJ766YLFChY5PXHauA8MRjVNRufHXiRxFAqkWKycBIuecetVvGOsweJPEA0t7jy9F05t97MD95x/B71csdMu/HdzFPfQtY+HbYj7NaL8vn46MfagBVXUPiRfb38y08OxN8q9GuSO/wBK7+zs7ewtI7a0iWKKMYVVFSQwxW8KxQRrHGgwqqMACn0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBi+JPDNn4jsvLnHl3CcxTrwyH61ydtqEoWTwh45Tck6+XBdn7so4wM9jXo1eR/H3xhpmh+EHsmkQ6pPxAB9+P39qAOfv8A4wn4Tzt4a1WD+1JImLRSRvwsR+6DU/wg+Mmo+OPHeoWequkVqybrWL0OelfK1xdT3kxluppJpD1eRix/Wul+GmuN4e+IWlX2/bGs4EnPUUAfetxPHa27zzuEjjG5mJ6CvI9Hkl8beIrnX9TU/Y7WUxWkB6ZH8VbPxW11j4c03TLRyDrjiIFTyAcGrul2CaXpNtZRqF8mMK2O59a3oxu7syqSsrFsnJpKKK6jnCiiigAooooAKOlFFAHJ+K7efQruLxXoynz7Zh9phX/lsua9O0bVINa0i3v7ZgyTIGOOxxyK5uSJJ4JIZFDLIpUg+9YHwsvJNL8Qax4TkYmOzPnRbv8AaPNc1aP2jenLodj4219fDnhW6vesmNkajqSeOK4Sye58KeELTRdN/e+INZYyburIG55/CofiD4igu/G0Fu/7220b55oBz5rN0474Ndb4G8OTxPL4g1sb9SvPu7ukUf8ACB6cVzmxr+EvDUHhnRUtkG6d/nnlblmY9ea3aKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDB8X+GoPEuivA3yXEX7yCVfvKw6c14d4/8AibZW/hhLS/k2+LNKlCqcYJANe1+PNb1XQPCl1faFp7X92inbGv8AD718KeJNW1HXPEF1qOtAi8mcmQFdvP0oA9U0r9oPxfqHiezgnuY0s5pkjZAvIBOOtfWF3fxWGkPfXLARxReYxJ68Zr879Ol8nVLWX+5MjfkRX15411yXWPB/h/RrSQrcakqSMVPVV6imlfQHoL4eWfxXrdx4o1YF4dxXT426IufSuvJycmobW2isrKG2t0CRxoAFHrjmpa7ox5VY5JO7uFFFFUSFFFFABRRRQAoODkVyHiOKfwxrFv4n0dSBuCXcS9Ch6sa66orq0j1CxnspgDHcIUbPoamUeZWKTs7nWadqFvqmnQXtm4eGdAyMO4qySACScAdSa80+EWpyxx6n4eujtGmzlLYHq0fPNanxA1+4RYPDui5k1LUPlKr1jjPBauFqx17mXdl/iL40FohY6FpUmZSOkko6fUV6UqrGgVAFVRgAcAVzVhHo/wAPvCscV3cJFHCuZZD96RvXHU1zcmu+JfHkht/DcTaXpZP/ACEJB/rV9AO1IDf8S+PdP0OT7HaKb/U2/wBXaw8lvx7VhW/hDXPGU63njS5eCxJ3x6bESpQ+5710vhvwTpXhtDJDH592/MlxL8zFvbPSujoArWGnWmmWq29hbxwRqMYRQM/X1qzRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/5L/4Q/69Jv616hXl/iz/AJL/AOEP+vSb+teoUAFFFFABRRRQAUUUUAeI/FmxXXPizoGj3txcJZSWjuyQyFMsCfSqJ+Fvh3P+t1H/AMCmrX+IX/JdfDn/AF4yfzNbR6mvzviXG4nD41QpVHFcq0T9T3cvo050ryV9Tj1+Fnh0uAZdR/8AApqyvCfi8eC9B8WaVbTT3Vyt55NnC7l3UEYz+tejJ98fWvKvCvw51Xxb4+8Q63outJps1le7Arx7w2R1x+Fb8L4zE4jEVI1puSUer8yMxpU6cE4q2p6H4B8A3t5bR33ifcI2PmeQeDI3UMa9ajjSKNUjUIijAVRgCvMf+EN+J/bx5bAf9ev/ANaj/hDfih/0Plv/AOAv/wBavvTxT1CivL/+EN+KH/Q+W/8A4C//AFqP+EN+KH/Q+W//AIC//WoA9Qory/8A4Q34of8AQ+W//gL/APWo/wCEN+KH/Q+W/wD4C/8A1qAPUKK8v/4Q34of9D5b/wDgL/8AWo/4Q34of9D5b/8AgL/9agD1CivL/wDhDfih/wBD5b/+Av8A9aj/AIQ34of9D5b/APgL/wDWoA9Qory//hDfih/0Plv/AOAv/wBaj/hDfih/0Plv/wCAv/1qAPUKK8v/AOEN+KH/AEPlv/4C/wD1qP8AhDfih/0Plv8A+Av/ANagD1CivL/+EN+KH/Q+W/8A4C//AFqP+EN+KH/Q+W//AIC//WoA9Qory/8A4Q34of8AQ+W//gL/APWo/wCEN+KH/Q+W/wD4C/8A1qAPUKK8v/4Q34of9D5b/wDgL/8AWo/4Q34of9D5b/8AgL/9agD1CivL/wDhDfih/wBD5b/+Av8A9aj/AIQ34of9D5b/APgL/wDWoA9Qory//hDfih/0Plv/AOAv/wBaj/hDfih/0Plv/wCAv/1qAPUKK8v/AOEN+KH/AEPlv/4C/wD1qP8AhDfih/0Plv8A+Av/ANagD1CvlL4t694W1fx1dWGt6dNBeR/J9tL/ACqPpXrv/CG/FD/ofLf/AMBf/rV8s/Ey11Ky8c3kOtXy312p+eZRgNQAt94LtZWUeGdVi1Q/xAfKVrnLrT7vTLwxXETJJGQTjnH41Fbm5D4tDLuP/PLOf0r0nwl8OfHvia0RLeyaOxlPzzzryB+NAHo3gzxMPH3iHTI3bzI9Jt12j0YDrXr7HLE+teR/DPwYPAfxT1TRZJ/OkFkshb6163XZR+E5qnxBRRRWpmFFFFABRRRQAUUUUAKOCK848aeIIvAfjmx1pztjvVaOUj+LjivRq8u+MXhx/FmveFdGhuFtpLiVgsjDODWdX4GaQ+I0/hP4Su9fv5fFPiINJG0zPaK4xuBPGQete3AADAGAK8qtvAvxKs7WK2tvHNtHDEoRFFr0A/Cpf+EN+KH/AEPlv/4C/wD1q4jpPUKK8v8A+EN+KH/Q+W//AIC//Wo/4Q34of8AQ+W//gL/APWoA9Qory//AIQ34of9D5b/APgL/wDWo/4Q34of9D5b/wDgL/8AWoA9Qory/wD4Q34of9D5b/8AgL/9aj/hDfih/wBD5b/+Av8A9agD1CivL/8AhDfih/0Plv8A+Av/ANaj/hDfih/0Plv/AOAv/wBagD1CivL/APhDfih/0Plv/wCAv/1qP+EN+KH/AEPlv/4C/wD1qAPUKK8v/wCEN+KH/Q+W/wD4C/8A1qP+EN+KH/Q+W/8A4C//AFqAPUKK8v8A+EN+KH/Q+W//AIC//Wo/4Q34of8AQ+W//gL/APWoA9QpkzmOCRwMlVJx+FeZf8Ib8UP+h8t//AX/AOtUc/g74ni3kLePLcgKcj7Njt9KAMP/AIaR0y08S3Wka1prwRwuUM+cg/hVfxH4f+GnxdjaXQ9Tt7PVWHyyDCDPoRXzZ4shuoPFN9Ffzi4uFlIeUDG4+tZ9mLwyj7B5/mE8eTnP6UAdL40+HOu+CNQMd/D5sBP7q4i+ZWHrx0r2L4VX7eIbjQri5bzJNOgaMe1YPgbwb8UNatorafdHpU42tLeDeQp9M12Pwz8NJ4S8Ya3o5k8w27ja3rnrWlNe+iJ/CepHqaSiiu05QooooAKKKKACiiigApQcHIpKKAOC1HWIfB/xgivHYJazWDMyZx5kmOKb4f8AE0smo3WpabZvq+v3zHYmOLRD2zWd8RPCw8WfE7RLOe4Mdt5YMka8Fh9a9s0Lw5pfhyxS10q1SJEGAxGWP1NcVT42dUPhRyukfD+e/v11fxpdnULvgxwg4jjHoR3ru4oo4IxHDGsaL0VBgD8KfRWZYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl3iohv2gPCIHVbSbP616jXDePfDt7NqVh4m0OLztS0wFVi/voetWvDXxF0rxBfrpeJYNTC5kgeMgKR15oA6+iiigAooooAKKKKAPD/ixqltofxh8PalqQlW0SzkVpEjLAHJ9KrH4o+Fc/wDH1cf+A7V7lcWVrd4+1W0M+OnmRhsfnUP9i6X/ANA2z/78L/hXhZhkWGzCt7aq2na2lv8AI7KGMqUI8sUjxRPij4UDgm6uP/Adq1/gNcJfXPiu+t0kFvcX4aJpEK7hg8816p/Yul/9A2z/AO/C/wCFWILW3tUK20EcKnqI0Cj9KvLslw+XTlOk221bUVfFzrpKSJaKKK9o5AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArwXXv2e5vFfju51jUdQENpI2fKAyWFe9UUAcT4W+EnhLwntfT9NR5h1klG7J/Gu0jijiQJEioo6KowKdRQB5DrqHTfjebyYbI7y2WJG/vHHSuvYYYisb4v6ZIbLTNdhU/8SqfzZCvUrxWlZXiajp1veRkFZ4w/HvXXReljnqLW5NRRRWxkFFFFABRRRQAUUUUAA61xmqIdT+MHhuOIbv7PcvIB/DmuzLCNGkYgKiliT7Vzfwytn1jxjrfidl3W02IYCexB5xWVZ2jY1prU9UooorjOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACo50MltIi9WQgflUlFAHz9H+zb/a3iu61XXdQ/0eWUsIFHJH1r1Xwz8M/C3hOMDStMiDY5eUBj9ea6yigBFVUUKihVHQAYAryK6jOnfGiWOX5DfKXj/wBoCvXq8w+LVk+n3uleKYFJeycQttHQMaqDtJMmSujoD1NJTYpUuLeOaJgySIGBH0p1d5yBRRRQAUUUUAFFFFABRRTZriOzt5LqYgRwqXbPoKAOSj/0/wCPenpGN8MFizO3YMO1ev15h8JrGW8vdY8QXS7lubgi0c/88+elen1wTd5NnXFWVgoooqSgooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACoEsrWOczR20KSn+NYwG/Op6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvrKHULGW0ukDxSrtZT3rybww0/hjWbvwxq7n/WGW1kboVPRRXsFc34y8JQ+JtNHlnyb+3O+2nHVW9/arhLldyZR5lYqEYODSVzeieI5kuv7F8SxfY9Th+UFvuyj+9n3rpSCPp612ppq6OVprcSiiimIKKKKACilAJ6CsHXfE8emSLY6dH9t1Of5YoE5wfUntSbS1Y0rlPxjfz3fk+G9HYm/vmALL/wAsl75r0Xw7osHh/QrbT7ZQojUb8d2xyfzrD8EeDm0VJNT1dhcaxd8yyH+AdlH0rsK4pz5mdMY8qCiiioLCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACqer6bDq+k3FjcqGSZCvI6HHBq5RQB5F4Tnn0O+n8K6uxWS2Y/ZXf/lqufWut6VL438IjxFZLc2LeRqdr80Eo6nHO38a5nQvExurg6VrkX2HVYflaN+A2O4NddKd1ZnPOFndHQ0UpBHWkrYyCiiigAoopQCelACdelcl4uurnV7238LaKd9zcsDcleixdwTV7W/E32W4XS9FT7Zqs/CKnIj9ya6XwT4QXw9ayXV63n6ndnfPMeSD6D2rGpUsrI1hC+rNzRtKttE0e206yXbDboEUVeoorkOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMLxP4S03xRZ+Xex7Zk5imThlPbmvPp7fxh4IbbdRnXdNHWccNCv07169SMqupVgGB6gjOaqMnHYTSe55VpnxB8P6o/lJNLDMOCssZUD8TXQrd2bqCt7bHPbzRW1q/g3w/rsXl6lpkUi/9M2aI/mhBrnI/gj4AilEkejThwc5/tO7/wDjtbKu+qMnS7Fo3NoPvXtsPrKKw9V8c6Bo4P2q5d26AQrvyfwrUm+CngK4k3zaNOzf9hO6H/tWtrRvAPhnw/8A8gvS0TjH72V5v/Q2NDr9kCpd2cBDf+KfGjCHw/Ztplg3/MQcckfSu68J+B7HwyhnY/atQkH725k5JPt6V0scaRIEiRUUdAowKdWMpuW5oopbBRRRUlBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc74q8G6d4otMTr5N0nMVwnDKf610VFAHkE7+LfBLeVqtudZ05Ot6o5QfSrml+PvD+rHZBcSRyDhllQqAfxr1J0WRSrqGU9QRkGsHWfA/hzX02anpcbj/pk7wn80INbRqyW5m6aZlLd2bgFb23OewlFDXVmoJa9thjn/Wiq0PwT8AwSiSLRp1cd/wC07o/+1aJfgl4Bmk8yTRp2b1/tO6/+O1ft/In2XmZOqePvD+kDFxcSSOThREhbJ/CqcEni7xo4i0q2Ojae3/L43Vx9K9A0fwP4c0IY0zS404x+9d5T/wCPk1vIiooVFCqOgAwKiVWT2KVNI53wr4L07wvbEwr513JzNcPyzH29K6OiisTQKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAP/Z) ###Code from typing import Sequence class Initializer: def init_weights(self, n_in, n_out) -> Sequence[Sequence[Var]]: raise NotImplementedError def init_bias(self, n_out) -> Sequence[Var]: raise NotImplementedError class NormalInitializer(Initializer): def __init__(self, mean=0, std=0.1): self.mean = mean self.std = std def init_weights(self, n_in, n_out): return [[Var(random.gauss(self.mean, self.std)) for _ in range(n_out)] for _ in range(n_in)] def init_bias(self, n_out): return [Var(0.0) for _ in range(n_out)] ###Output _____no_output_____ ###Markdown Exercise e) Dense layerComplete the DenseLayer class below. The dense layer takes an input vector and computes an output vector corresponding to the value of each artificial neuron in the dense layer. ###Code class DenseLayer: def __init__(self, n_in: int, n_out: int, act_fn, initializer: Initializer = NormalInitializer()): """ n_in: the number of inputs to the layer n_out: the number of output neurons in the layer act_fn: the non-linear activation function for each neuron initializer: The initializer to use to initialize the weights and biases """ self.weights = initializer.init_weights(n_in, n_out) self.bias = initializer.init_bias(n_out) self.act_fn = act_fn def __repr__(self): return 'Weights: ' + repr(self.weights) + ' Biases: ' + repr(self.bias) def parameters(self) -> Sequence[Var]: """Returns all the vars of the layer (weights + biases) as a single flat list""" flat_weights = [weight for sublist in self.weights for weight in sublist] return flat_weights + self.bias def forward(self, inputs: Sequence[Var]) -> Sequence[Var]: """ inputs: A n_in length vector of Var's corresponding to the previous layer outputs or the data if it's the first layer. Computes the forward pass of the dense layer: For each output neuron, j, it computes: act_fn(weights[i][j]*inputs[i] + bias[j]) Returns a vector of Vars that is n_out long. """ assert len(self.weights) == len(inputs), "weights and inputs must match in first dimension" output: Sequence = [] for j in range(len(self.bias)): _sum = self.bias[j] for i in range(len(inputs)): _sum += self.weights[i][j] * inputs[i] output.append(self.act_fn(_sum)) return output ###Output _____no_output_____ ###Markdown Verify that your class is correct by running the code below, and verifying that `actual` is the same as `expected`. Here we define a small 3x2 dense layer with some fixed parameters and use numpy to compute the expected values. ###Code import numpy as np np.random.seed(0) w = np.random.randn(3, 2) b = np.random.randn(2) x = np.random.randn(3) expected = np.tanh(x@w+b) class FixedInit(Initializer): """ An initializer used for debugging that will return the w and b variables defined above regardless of the input and output size. """ def init_weights(self, n_in, n_out): return [list(map(Var, r.tolist())) for r in w] def init_bias(self, n_out): return list(map(Var, b.tolist())) layer = DenseLayer(3, 2, lambda x: x.tanh(), FixedInit()) var_x = list(map(Var, x.tolist())) actual = layer.forward(var_x) print(actual) print(expected) ###Output [Var(v=0.8935, grad=0.0000), Var(v=0.5275, grad=0.0000)] [0.89347265 0.52750061] ###Markdown Exercise f) MLPWe'll now combine multiple DenseLayers into a neural network. We'll define a class to help us with this. We name it Multi-Layer Perceptron (MLP), since in the "old days", a single dense layer neural network was called a perceptron. It takes a list of DenseLayer as input and defines a forward function. The forward function takes a vector of inputs, the data inputs, and return a vector of outputs, the output of the neural network, after being passed through each layer of the network. It also has a parameters function which just returns all the parameters of the layers as a single flat list.Complete the MLP class below. ###Code class MLP: def __init__(self, layers: Sequence[DenseLayer]): self.layers = layers def parameters(self) -> Sequence[Var]: """ Returns all the parameters of the layers as a flat list""" output = [] for layer in self.layers: output += layer.parameters() return output def forward(self, x: Sequence[Var]) -> Sequence[Var]: """ Computes the forward pass of the MLP: x = layer(x) for each layer in layers """ for layer in self.layers: x = layer.forward(x) return x ###Output _____no_output_____ ###Markdown Exercise g) SGDNow we need code that will perform the stochastic gradient descent. Complete the class below ###Code class SGD: def __init__(self, parameters: Sequence[Var], learning_rate: float): self.parameters = parameters self.learning_rate = learning_rate def zero_grad(self): """ Set the gradient to zero for all parameters """ for param in self.parameters: param.grad = 0.0 def step(self): """Performs a single step of SGD for each parameter: p = p - learning_rate * grad_p """ for param in self.parameters: param.v += - learning_rate * param.grad ###Output _____no_output_____ ###Markdown Loss functionsWe are only missing a loss function now. We're doing regression so we'll use the L2 loss function $L2(t, y) = (t-y)^2$, where $t$ is the expected output (the target) and $y$ is the output of the neural network. ###Code def squared_loss(t: Var, y: Var) -> Var: return (t-y)**2 ###Output _____no_output_____ ###Markdown Backward passNow the magic happens! We get the calculation of the gradients for free. Let's see how it works. ###Code mlp = MLP([ DenseLayer(1, 5, lambda x: x.tanh()), DenseLayer(5, 1, lambda x: x) ]) x, t = sample_data() x = Var(x) t = Var(t) y = mlp.forward([x]) loss = squared_loss(t, y[0]) loss.backward() ###Output _____no_output_____ ###Markdown and the gradients will be calculated: ###Code for i,layer in enumerate(mlp.layers): print("layer", i, layer) ###Output layer 0 Weights: [[Var(v=0.2013, grad=0.9330), Var(v=-0.0170, grad=1.2256), Var(v=0.0649, grad=-0.2560), Var(v=-0.0160, grad=-0.0814), Var(v=0.1272, grad=-1.7129)]] Biases: [Var(v=0.0000, grad=-0.5109), Var(v=0.0000, grad=-0.6711), Var(v=0.0000, grad=0.1402), Var(v=0.0000, grad=0.0446), Var(v=0.0000, grad=0.9380)] layer 1 Weights: [[Var(v=-0.0962, grad=-2.1342)], [Var(v=-0.1108, grad=0.1883)], [Var(v=0.0234, grad=-0.7154)], [Var(v=0.0074, grad=0.1774)], [Var(v=0.1632, grad=-1.3832)]] Biases: [Var(v=0.0000, grad=6.0634)] ###Markdown Exercise h) Putting it all togetherWe are ready to train some neural networks!We'll train the neural network for 100 gradient updates. Each gradient will be calculated on the average loss over a minibatch of samples. Read and understand the code below. Answer the inline comment questions. We'll plot the loss for each batch, which should decrease steadily. ###Code mlp = MLP([ DenseLayer(1, 16, lambda x: x.tanh()), DenseLayer(16, 1, lambda x: x) ]) # What does this line do? # Creates a neural network with 1 input, a hidden layer of 16 neurons, and a single output # It uses the tanh as the activation function for the hidden layer and no function for the output layer learning_rate = 0.01 # Try different learning rates optim = SGD(mlp.parameters(), learning_rate) # What does this line do? Intializes the SGD class to use for backpropagation batch_size = 64 losses = [] for i in tqdm.tqdm(range(100)): loss = Var(0.0) for _ in range(batch_size): # What does this loop do? Runs through batch_size samples of out training data and accumulates the loss. x, y_target = random.choice(train_data) # What does this line do? #Samples from the training data x = Var(x) y_target = Var(y_target) y = mlp.forward([x]) loss += squared_loss(y_target, y[0]) loss = loss / Var(batch_size) # What does this line do? #Averages out the loss from the previous loop (mini_batch) losses.append(loss.v) optim.zero_grad() # Why do we need to call zero_grad here? #To reset the gradient from the previous mini_batch because we start a new training session loss.backward() # What does this line do? # Calculates the gradient with respect to each parameter optim.step()# What does this line do? # Updates the parameters based on the gradients plt.plot(losses, '.') plt.ylabel('L2 loss') plt.xlabel('Batches') plt.show() ###Output 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 100/100 [00:01<00:00, 66.89it/s] ###Markdown The plot should look similar to: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXgAAAEGCAYAAABvtY4XAAAcW0lEQVR4Ae2de6xlVX3Hv0ihyluRh1XOMGPoNNpYCiZoFWpNRaskptY2bUKtDXb6skGpyQwwMRiTWkraSJ1qmyAKFaUtJZhcxqAVakkp0strALUMLQOSQNXS1hr7hzSn+V3W7/KbfffeZ++zz7n79dnJzV577fX4rc9v7e9Zd+1z1pI4IAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEOETj++OOnZ555Jn8woA/QB+gDFfuApG93SMaLTTFx54AABCAAgeoEJK0Wq2qH7iDw1Z1KSghAAAJGAIGnH0AAAhAYKAEEfqCOpVkQgAAEEHj6AAQgAIGBEkDgB+pYmgUBCEAAgacPQAACEBgogcEL/OqBp6d7bt0/tTMHBCAAgTERGLTAm6hv3713unXXytoZkR9T16atEIDAoAXeRu4m7lt2rky37VpZG8njcghAAAJjITBogfcRvIm7jeQZwY+lW9NOCEDACAxa4K2BJurMwdPZIQCBMRIYvMCP0am0GQIQgIARQODpBxCAAAQGSgCBH6hjaRYEIAABBJ4+AAEIQGCgBBD4gTqWZkEAAhBoS+C3S7ov/H1X0vvKlp9nPXg6KwQgAIF6BNoS+Kjlh0p6StKWGJkNI/D1HEtqCEAAAl0Q+HMl/WNW0LPXCDydFQIQgEA9Al0Q+KslvTcr6Ol6RzJwdTKZ1GsZqSEAAQiMnEDbAn+4pO9IOqlA4NejGcGPvKfSfAhAoDaBtgX+7ZK+uK7iJQEEvrZvyQABCIycQNsCf72kXy/R9fVbCPzIeyrNhwAEahNoU+CPlPQfko5dV/GSAAJf27dkgAAERk6gTYEvkfONtxD4kfdUmg8BCNQmgMDXRkYGCEAAAv0ggMD3w09YCQEIQKA2AQS+NjIyQAACEOgHAQS+H37CSghAAAK1CSDwtZGRAQIQgEA/CCDw/fATVkIAAhCoTQCBr42MDBCAAAT6QQCB74efsBICEIBAbQIIfG1kZIAABCDQDwIIfD/8hJUQgAAEahMYlcCvHnh6uufW/VM7c0AAAhAYOoHRCLyJ+vbde6dbd62snRH5oXdt2gcBCIxG4G3kbuK+ZefKdNuulbWRPO6HAAQgMGQCoxF4H8GbuNtInhH8kLs1bYMABIzAaATeGmuizhw8HR8CEBgLgVEJ/FicSjshAAEIGAEEnn4AAQhAYKAEEPiBOpZmQQACEGhT4I+TdIOkb0j6uqTXbtyo77kYtuyjs0IAAhCoR6BNgb9G0nuShB8uyQS/8EDg6zmW1BCAAATaEvhjJT0q6ZBCRc/cQODprBCAAATqEWhL4E+XdJekT0u6V9JVko7MaLpd7kgGrk4mk3otIzUEIACBkRNoS+BfLekZSWclUb9S0odzBH49ihH8yHsqzYcABGoTaEvgT5Z0YF29pbMl3RyuNwQR+Nq+JQMEIDByAm0JvAn47ZK2JyW/TNIVG1Q9RCDwI++pNB8CEKhNoE2Bt3n4VUn7JN0k6YVBzzcEEfjaviUDBCAwcgJtCvwGES+LQOBH3lNpPgQgUJsAAl8bGRkgAAEI9IPAaAWelSX70UGxEgIQmJ/AKAXe14Znd6f5Ow45IQCB7hMYpcCzu1P3OyYWQgACzQmMUuB9BM/uTs07ECVAAALdJTBKgTd3MAff3U6JZRCAwGIIjFbgF4OPUiAAAQh0lwAC313fYBkEIACBRgQQ+Eb4yAwBCECguwQQ+O76BssgAAEINCKAwDfCR2YIQAAC3SWAwHfXN1gGAQhAoBEBBL4RPjJDAAIQ6C4BBL67vsEyCEAAAo0IIPCN8JEZAhCAQHcJIPDd9Q2WQQACEGhEAIFvhI/MEIAABLpLAIHvrm+wDAIQgEAjAm0K/AFJD0i6r4oRbNnXyM9khgAERkigiraWbZXa5J4J/IurFoDAj7B30mQIQKARAQS+ET4yQwACEOgugTYF/lFJ90i6W9KOgpG8xa/a32Qy6S5FLIMABCDQQQJtCvxLk6ifKOl+SecUiPxaNFM0Hew9mAQBCHSaQJsCH/X8MkkfiBHZMALf6X6EcRCAQAcJtCXwR0o6Oom4he+Q9JasqMdrBL6DvQeTIACBThNoS+C3pWkZm5p5SNKlUczzwgh8p/sRxkEAAh0k0JbA52l4aRwC38Heg0kQgECnCSDwnXYPxkEAAhCYnwACPz87ckIAAhDoNAEEvtPuwTgIQAAC8xNA4OdnR04IQAACnSaAwE+n09UDT0/33Lp/7dxpb2EcBCAAgRoERi/wJu7bd++dbt21sna2aw4IQAACQyAweoG3kbuJ+5adK9Ntu1bWRvJDcCxtgAAEIDB6gfcRvIm7jeQZwfNQQAACQyEweoE3R5qoMwc/lC5NOyAAASeAwDsJzhCAAAQGRgCBzziU0XwGCJcQgEBvCSDwwXU+H883agIUghCAQG8JIPDBdXyjJsAgCAEI9J5AU4G/UNIxkg6R9Mm0Bd+5pctCznlzM1aT9BE836jpfb+mARCAwHQ6bSrwtp67HW+WdKOkVyaRT9GLO22GwFuPYA6e5wICEBgKgaYCvy9J+JWSfj6F712crD9X0mYJ/FAcSzsgAAEINBX4T0n6oqT9ko5I2/Dd/ZwsLy6EwNNZIQABCNQj0FTgnyfpDEnHJSl/kaRXLU7WnysJga/nWFJDAAIQaCrwr5Nkm2bbcb6kP5G0JV1XOR0qyaZ0VmYlRuDprBCAAATqEWgq8DYHb9+g+Ykk1L8r6SuzxDrcv0jSZxH4ek4jNQQgAIEqBJoK/D1JrD8o6YIU9rig47nBl0n6sqQ3IvBVXEUaCEAAAvUINBV4G61fnF6ynizJ5uQfyJXzjZE3SDpT0hsQ+HpOIzUEIACBKgSaCryJuk2znJ30eyLpXRu1fEPMeZI+nmLLBH5HMnB1MplUaQ9pIAABCEAgEWgq8KbRJ0kywba/EzdIeX7ERyQ9IemApKckfV/SZ/KTPhvLS1b6LAQgAIF6BJoK/C9JekzSNZKulfSopHeWCXXOvbIR/HpyBL6eY0kNAQhAoKnA21IFcdR+giRfvmBdnGcEEHj6IQQgAIElEGgq8NkXqnVess7Q/YNvM4JfgvcpEgIQGDSBpgJ/haRbJL07/X1B0uUHS/NirhD4QfdDGgcBCCyBQFOBN/X+hfQLVvsVqy84thhVD6Ug8EvwPkVCAAKDJrAIgQ8yvLwgAj/ofkjjIACBJRCYV+D/R9J3c/48fuFKj8AvwfsUCQEIDJrAvAK/cAGfVSACP+h+SOMgAIElEEDgS6Cyu1MJHG5BAAKdJ4DAF7jI92fdumtlun333rWt/AqSEg0BCECgkwQQ+AK37Ll1/9TEfcvOlaltwm3XHBCAAAT6RACBL/CWj+BN3BnBF0AiGgIQ6DSBeQX+FEnXS7pd0iWSDgsvSW8K4YUF23jJyhx8p/suxkEAAjMIzCvwX5L0W5JOl/QxSXdIOj6puW3Bt/CjDYGP7BD7SIMwBCDQBwLzCvx9GQW3/VgfkvRySVV3dMoUUX7ZpsD7dA0vXPvQpbERAhBwAvMKvIn58zOS/LOSHpH0ZCZ+IZdtCjwvXL27cIYABPpEYF6Bf7+kn85R7p+UZNM3Cz/aFHgfwfPCtU9dG1shAIF5Bb5MwN9XdnPee20KvHUT5uB5WCAAgb4RWIbAPz6viJfla1vg++ZY7IUABCCwDIH/ZplQz3sPgaezQgACEKhHYBkCzwi+ng9IDQEIQGApBOYVeF8WOLtksMU/M+8ovSwfI/il+J9CIQCBAROYV+DLtLjKPfuK5V1pg277yuWHZmVC4AfcC2kaBCCwFAJtCfwhko5Kom7LHHxV0mvKRB6BX4r/KRQCEBgwgbYEPmr5EenXr2fFyGwYgR9wL6RpEIDAUgi0KfCHSrIlD74n6fKsoKfrHcnA1clkshQAFAoBCEBgqATaFHjX9OMk3Sbpxz0i78wIfqhdkHZBAALLItAFgTc9/6CkD+QJu8ch8MvqApQLAQgMlUBbAn+CJBu52/GCtK78eek699QlgWfZgqE+DrQLAsMi0JbAv0qSrRu/T9KDaQSfK+we2RWB94XHWDp4WA8CrYHAEAm0JfCu25XPXRF4lg4e4mNAmyAwTAIIfE2/+gjelw6+7s7H1jbktngOCEAAAl0igMDP4Q2fgzdxtw25ma6ZAyJZIACBpRNA4BsgZrqmATyyQgACSyeAwDdAnJ2uYZqmAUyyQgACCyeAwDdE6tM1iHtDkGSHAAQWTgCBXzhSCoQABCDQDQIIfDf8gBUQgAAEFk4AgV84UgqEAAQg0A0CCHw3/IAVEIAABBZOAIFfOFIKhAAEINANAgh8N/yAFRCAAAQWTgCBXzhSCoQABCDQDQII/AL9wHfiFwiToiAAgcYEEPjGCJ8twMSddWkWBJNiIACBhRBA4BeCcbq2oqQtOrZl58rUVpq0dWo4IAABCLRJAIFfEH0fwfsywixdsCCwFAMBCMxNAIGfG93GjNk5+Oz1xhzEQAACEFgeAQR+SWx9RM9a8UsCTLEQgMBMAm0J/CmSbpP0NUkPSbpw1t59XdmybybRlIC14quSIh0EILAsAm0J/EsknZFE/WhJD0t6RZnI903gfQTPnPyyui7lQgACswi0JfBZLf+8pDdlI+N13wTewDMHP6v7cR8CEFgmgS4I/KmSHpd0TBT0FN6RDFydTCbL5EDZEIAABAZHoG2BP0rS3ZLekSPuB0X1cQQ/uN5CgyAAgV4RaFPgD5N0i6SLDlLyggsEvlf9CmMhAIEOEGhL4A+RdK2kjxbo+YZoBL4DvQUTIACBXhFoS+BfL2kqaZ+k+9LfWzeoeohA4HvVrzAWAhDoAIG2BD5Id7UgAt+B3oIJEIBArwgg8L1yF8ZCAAIQqE4Aga/OipQQgAAEekUAge+VuzAWAhCAQHUCCHx1VqSEAAQg0CsCCHyv3IWxEIAABKoTQOCrsyIlBCAAgV4RQOB75S6MhQAEIFCdAAJfnRUpIQABCPSKAALfK3dhLAQgAIHqBBD46qwapYxrw8dwo0LJDAEIQKCEAAJfAmdRt3x3J9uf9bRLbp6eduneKXu1Loou5UAAAkUEEPgiMguMj/uznrpzZWp/W3auTG07P7vHAQEIQGAZBBD4ZVDNlOkjeBN0H8GzV2sGEpcQgMDCCSDwC0eaX2Ccd4/h/NTEQgACEGhOAIFvzrBRCYh9I3xkhgAESggg8CVwln3Lp2544bps0pQPgXESQOBb9Ht8+coL1xYdQdUQGCgBBL5Fx/oInheuLTqBqiEwYAJtCfzVkr4l6cFqG/ZJQ92yjzn4AT9dNA0CLRNoS+DPkXQGAt+y96keAhAYNIG2BN4G7qci8IPuWzQOAhBomUDXBX5HMnB1Mpm0jIrqIQABCPSLQNcFfn2Kfqhz8P3qLlgLAQj0iQAC3ydvYSsEIACBGgQQ+Bqwlp2Ub9QsmzDlQ2BcBNoS+M9JelLSDyQ9IemC9bmYgsDQp2j8O/H8qnVcDyCthcAyCbQl8AUyXhw9dIHnV63L7OaUDYFxEkDgO+J3H8Hn/aqVqZuOOAkzINAzAgh8hxyWJ+Qu/EzddMhRmAKBnhBA4DvuKKZuOu4gzINAhwkg8B11jo/mr7vzsen23XvXtvezs8VzQAACEKhCAIGvQmmT02SnZUzkbSSPuG+yI6gOAj0ngMB30IFMy3TQKZgEgR4SQOA76DQfwed9o6aD5mISBCDQUQIIfEcdYyKfNy1TN76jzcMsCEBgEwgg8JsAeVFV+MjevzLpc/P+ItbjLR0HBCAAAQS+R30gzs1v3bkyffnFN09N1P28ZefK2rdtLB0HBCAAAQS+R33AR/A2Nx9F3cU+b86+aEon2+yq6bL5uIYABLpLAIHvrm9yLXMh9mkZF3WfrrH7fljYvjvvUzd5aSxtNl0sw8viDAEI9I8AAt8/n61bbEJc9iL2khv3rYm7Td34KN/F3vJ6/pjOPjDiFI+nsXOVo276KmVm02xGHdk6uYZAHwkg8H30WonNJn4+aj/tkpunp1367K9g45SOifjFN+7LTWd5faTv/yX4h4LHF4l9rDv7q9soylXCRU2sWkdR/qL4aFNMUxQf02TDdfMUpS+Kz9Y363pR5cyqJ+9+Ud1F8XlldDmu6+1A4Lvce+awLb6IdSG3OBdrn9LJjtpN8GO67Mvb7H8AUey9k1cp0z90rPyicPbDwTAU1eH/bdh9/2DLy+8ovRw7e9jZZD/IsvGW3g/P63F+nc2Tx8nSzkqfLcfr8fr97OXEMmNaC5dxifm9zOw5ponhbLrsdVHdRfHZ/FWvi2yqEl8lTZEd2XZEX5flsT5reeMR7YjhmGaeMAI/D7UO5/FO50IeO1LsOEXprPOZ0MVpHSsr/gcQxT5PpC19jI95T925MrU/K78obPn9A8fsdFvjh4K3zx+qog8Xz5/98CqyL7Yt2h1tyopvvI55YlmxvhguSh/jY91V2hM/4KI/rRwXlywPy+Msq9aRl97y+pGt231a5ivLa2W4nbPCkX1sQzbe7bKzf+BFP5TlLWpnbF/0tZXl9cX2ZG3ycmN81qZYjnOtc0bg69DqSVrrFP6AlJmcl87irINmBdQ7ocVH8YkiHYUoPsTe+e2+d+AqYR9Rx7JiHW5TVvi9jhhv4SK7o30xTYyPZcY0Zs/5V90580MxcorhWEcsN8bHumM4po9l5jGyOBcxO2d5eH11mWXTex3W/9w/Vne028PZeM/r9nm6bB0xPjKIbYjxkUfsS5FZUd4YH+uNtlr5RfU5gzLeMW+0Kdo9r9Aj8GUKONJ7ecJvKDzeO218QC1snd47op3t2uMtj3/oeDl2zgvHh9A7eSzL8tgRR1CezuJi/vjA+MNqaf1hzdoX2+YPcbbMWI6nifZ5W2NZsb4Y9vxWR0wf4+u2x8s3UYnlOGuLL/sPrQqzIlFyNtm6YxuKfGXx8cMy2lEU9vosb7QpxkceHrb0MVyUN8ZHGyy/+cyZRt95uUWiXsXWaJ+z9H5fR5baFPi3SPoXSY9I2lW8Wd+zd4a+ZV8dp3UhrXdsO8dwtK0oPqbJC1u+KJhFdeSls/JivD9s9sBkxc7S+UMa7ciLj2Vmy/E6Z5UVy43hOnVXaU9WTM0uP4rakSdQZcyK0kdBtPxed7Zeu7YjG19Ubmx3DEdfxLwxPsvDp4qsbvszG4vyxvhsvd6GyNbKivXFD5poUyw3xkebYjmRpddX5dyWwB8q6V8lbZN0uKT7Jb2iTOQR+CruHE4af/CyD1G2hUXpYnwMZ/PXuV5UOXXq9LSx7hj2+/Fs9000XKDtOh5F+WN8DMe8MRzTeDgrXBbvh6eJcXYvGx+vq4S9/LyyPK6Mh+ePdXmc5zfhtvtFabLpY33GxPNn0+XFexqrK5Zj13WPtgT+tZJuCYJ+sST7KzwQ+LquJf2YCVQRomXxabPuojZttk2Lqq9pOW0J/DslXRXU/Fcl7QnXHtyRDFydTCZFviMeAhCAAARyCHRd4F3oxQg+x3tEQQACECgh0JbAM0VT4hRuQQACEFgEgbYE/ock/ZukreEl6yvXh+s5AUbwi3A3ZUAAAmMi0JbAm4S/VdLD6ds0l+Zo+kFRCPyYuiVthQAEFkGgTYE/SMBnXSDwi3A3ZUAAAmMigMCPydu0FQIQGBWB3gi8pG8nY1fnOB+YI8889XQpzxjbbPzH2O4xthlfS1X0xnRz8IeBGNsxxjabj8fY7jG2GV+PTdFK2jvGB2CMbeahL3kIBnhrjH18jG2e2XXHCGWMbUbgZz4Kg0owxj4+xjbP7LS25MHYjjG22Xw8xnaPsc34emyKRnshAAEIQAACEIAABCAAAQhAAAIQgAAEukig1q5RXWxARZtOkXSbpK9JekjShSnfiyR9SdL+dH5hxfL6lMw2j7lX0koy2tY3+mraKeyv0lpHfWpPFVuPk3SDpG9I+rokW7xv6L5+f+rbD0r6nKTnp7WshubrqyV9S5K1048i3x4i6U9TX98n6QzPMIZz7V2jegzlJcG5R6c1fmyHrD8K2yHatoiX97iNRaZfJOmzQeD/WtIvp8R/Lum3izL2OP4aSe9J9tuOaCb4Q/b1SyU9KukFqc3m43dLGqKvz0nPchT4It/ael5fkGRC/5o0sOlxt65neu0liesV3+nUn5f0prTnrYm/HXa2PXCHdLxM0pclvTEJvHX070iy1UrtyPaBFN3r07FJ7Kyt8TDfDtXXJvDfTP+lmG/tv7U3D9jXp2ZG8EW+/QtJvxI6QUwXoocZrLpr1NBab53jcUnHSPqv0DgThHgdbvU2aNMUZ0p6Q3roX5z+XfUG2dRVHAl5fJ/Pp0u6S9Kn09SU7Yx2ZMa3Q/S1TTt+Ly1Zcp2kIfs6K/DxuY2+tQ+614fObIOdV4frQQfHKPBHSbpb0juSZ2PHsKj/HJDHz5P08dSeMQm8PcDPSDortf1KSR/OCPzQfG3vjm6VdIKkwyTdJOn8AX+Ylwl89O2oBT777/nMjb3TA9PXk3V828jc5qT9iP+yDW2K5iOSnkiLiz0l6fuSbGQ39Cmak1Ob3cdnS7p54NNxvyjpk95gSe+S9IkB+zor8EXP8ainaGrvGhU6UN+C9m/btZI+mjH8isxLVntZM8TDR/DWtr/JvGT9nQE2+HZJ21O7LpNkfh6yr+2/Fft22BHphaK9ZP69Afs6K/BFvn1b5iWrTd2N6qi1a1SPydg83FSSfVXqvvRnbT8+vYS0r0n+XXpJ1eNmFpoeBX5bmqN+JAnADxfm6u8Nm4e3NUnM3zZdYVMYQ/f1h9LXQu2dyl9KMr8O0df2FdAnJf0g/Yd6QYlvbWD3Z2lXvAfGNP/e30cXyyEAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCGwWgf9LXye9X9I9kn5qRsW2oFeV79T/PV9Xm0GS2xCAAASWTMDWNfHDFq76il8UnLM/PilIJgS+iAzxEIAABDaJQBR4+ym8/WDIDlvPxxZtslG9/Xjk7Sn+ekn/m0b99stCO3amNPZfwB+mOBN4W5bZflX4sCRbSsAOW77a8v1z+oHSb6Z4W0LiH1K59iMeT59uc4IABCAAgboEfIrGNs3477QqpZVhS1zYSpx2+IqF9ovB7Aj+5yTdkX4+b2ltUwY7TOD/OIXt18T2C2I7bGPs3Slsv8K0X6La5iS/L+nSFG8fAraePwcEIAABCDQgEEfwtjCdrXViQm6Ltu0Jyz7YqN0W+MoKvIn4b+TUbwL/uhR/Uljx0JY2thG9LyVhG1icK8k2eLBlFWxNGVuCgAMCEIAABBoSiAJvRf27pBPTTkG21Z8JvR0HkrjXEXhfk9v+A7D8dvxt2qQiXR50+pH0YWHibyslckAAAhCAQAMCUeB/LC09a1MktrHEx1K5P5MWcTNxt8W8Hgv12X6/RVM0eQJvUzQ2z+8fHD+aNurYkubnrej35qwIGqokCAEIQAACVQj4HLyNmu0lqS29aoeNuv8pvTz9VNrU2gTeDtv71V6E+ktW2+fWNjq3Mv4gpYnfookj+OelNPbi1sqwTdJt+71fS9e2cbgtA2zz8hwQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKBwP8Dajpbh65w6sgAAAAASUVORK5CYII=) Let's also plot the data and what the Neural Network has learned. ###Code for _ in range(100): x, y_target = sample_data() y = mlp.forward([Var(x)]) plt.plot(x, y_target, 'b.') plt.plot(x, y[0].v, 'r.') plt.title('True (blue) and MLP approx (red)') plt.show() ###Output _____no_output_____ ###Markdown [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-06/lab.ipynb) Contents and why we need this labThis lab is about implementing neural networks yourself from scratch. All the modern frameworks for deep learning use automatic differentiation (autodiff) so you don't have to code the backward step yourself. In this version of this lab you will develop your own autodif implementation, and use this to build a simple neural network. Once you've done this lab you should have a very good understanding of what goes on below the hood in the modern framework such as [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) or [JAX](https://github.com/google/jax). In particular the code we'll develop will look quite similar to the pytorch API. External sources of information1. Jupyter notebook. You can find more information about Jupyter notebooks [here](https://jupyter.org/). It will come as part of the [Anaconda](https://www.anaconda.com/) Python installation. You can also use [colab](colab.to), which is a free online jupyter notebook.3. [Nanograd](https://github.com/rasmusbergpalm/nanograd) is a minimalistic version of autodiff developed by Rasmus Berg Palm that we use for our framework. Nanograd automatic differention framework The [Nanograd](https://github.com/rasmusbergpalm/nanograd) framework defines a class Var which both holds a value and gradient value that we can use to store the intermediate values when we apply the chain rule of differentiation. ###Code # Copy and pasted from https://github.com/rasmusbergpalm/nanograd/blob/main/nanograd.py from typing import Union from math import tanh class Var: """ A variable which holds a number and enables gradient computations. """ def __init__(self, val: Union[float, int], parents=None): assert type(val) in {float, int} if parents is None: parents = [] self.v = val self.parents = parents self.grad = 0.0 def backprop(self, bp): self.grad += bp for parent, grad in self.parents: parent.backprop(grad * bp) def backward(self): self.backprop(1.0) def __add__(self: 'Var', other: 'Var') -> 'Var': return Var(self.v + other.v, [(self, 1.0), (other, 1.0)]) def __mul__(self: 'Var', other: 'Var') -> 'Var': return Var(self.v * other.v, [(self, other.v), (other, self.v)]) def __pow__(self, power: Union[float, int]) -> 'Var': assert type(power) in {float, int}, "power must be float or int" return Var(self.v ** power, [(self, power * self.v ** (power - 1))]) def __neg__(self: 'Var') -> 'Var': return Var(-1.0) * self def __sub__(self: 'Var', other: 'Var') -> 'Var': return self + (-other) def __truediv__(self: 'Var', other: 'Var') -> 'Var': return self * other ** -1 def tanh(self) -> 'Var': return Var(tanh(self.v), [(self, 1 - tanh(self.v) ** 2)]) def relu(self) -> 'Var': return Var(self.v if self.v > 0.0 else 0.0, [(self, 1.0 if self.v > 0.0 else 0.0)]) def __repr__(self): return "Var(v=%.4f, grad=%.4f)" % (self.v, self.grad) ###Output _____no_output_____ ###Markdown A few examples illustrate how we can use this: ###Code a = Var(3.0) b = Var(5.0) f = a * b f.backward() for v in [a, b, f]: print(v) a = Var(3.0) b = Var(5.0) c = a * b d = Var(9.0) e = a * d f = c + e f.backward() for v in [a, b, c, d, e, f]: print(v) ###Output Var(v=3.0000, grad=14.0000) Var(v=5.0000, grad=3.0000) Var(v=15.0000, grad=1.0000) Var(v=9.0000, grad=3.0000) Var(v=27.0000, grad=1.0000) Var(v=42.0000, grad=1.0000) ###Markdown Exercise a) What is being calculated?Explain briefly the output of the code? What is the expression we differentiate and with respect to what variables? Exercise b) How does the backward function work?For the first example above, execute the backward function by hand to convince yourself that it indeed calculates the gradients with respect to the variables. Write down the sequence of calls to backprop for the first example above. Exercise c) What happens if we run backward again?Try to execute the code below. Explain what happens. ###Code f.backward() for v in [a, b, c, d, e, f]: print(v) ###Output Var(v=3.0000, grad=28.0000) Var(v=5.0000, grad=6.0000) Var(v=15.0000, grad=2.0000) Var(v=9.0000, grad=6.0000) Var(v=27.0000, grad=2.0000) Var(v=42.0000, grad=2.0000) ###Markdown Exercise d) Test correctness of derivatives with the finite difference methodWrite a small function that uses [the finite difference method](https://en.wikipedia.org/wiki/Finite_difference_method) to numerically compute the gradient:$$\frac{\partial f(x)}{\partial x} \approx \frac{f(x+dx)-f(x)}{dx}$$for a very small $dx$. ###Code def finite_difference(fn, x_val, dx=1e-10): """ Computes the finite difference numerical approximation to the derivative of fn(x) with respect to x at x_val: (fn(x_val + dx) - fn(x_val))/dx """ f = fn(x_val+dx) g = fn(x_val) return (f-g)/dx ###Output _____no_output_____ ###Markdown Use your finite difference function to compute the gradient of $f$ with respect to $a$ and $b$ in the following function: $f(x) = a \cdot b + b$, at a=3 and b=5. ###Code # test function - try to change into other functions as well def f(a, b): return a*b + b a = Var(3.0) b = Var(5.0) f = a * b + b for v in [a,b,f]: print(v) f.backward() for v in [a,b,f]: print(v) ###Output Var(v=3.0000, grad=0.0000) Var(v=5.0000, grad=0.0000) Var(v=20.0000, grad=0.0000) Var(v=3.0000, grad=5.0000) Var(v=5.0000, grad=4.0000) Var(v=20.0000, grad=1.0000) ###Markdown Write the same function using Nanograd `Var`s and verify that Nanograd computes the same gradients ###Code def fn(a,b): return a print(finite_difference(fn(3,5),3)) ###Output _____no_output_____ ###Markdown Create an artificial dataset to play withWe create a non-linear 1d regression task. The generator supports various noise levels. You can modify it yourself if you want more or less challenging tasks. ###Code from math import sin import random import tqdm as tqdm import matplotlib.pyplot as plt def sample_data(noise=0.3): x = (random.random() - 0.5) * 10 return x, sin(x) + x + random.gauss(0, noise) train_data = [sample_data() for _ in range(100)] val_data = [sample_data() for _ in range(100)] for x, y in train_data: plt.plot(x, y, 'b.') plt.show() ###Output _____no_output_____ ###Markdown Building the neural network.We'll create a feedforward neural network consisting of a series of dense layers. See the image below. Each dense layer is just a number of artificial neurons. In the image below each column of circles (neurons) is a dense layer. It's dense because the weight matrix is dense; there's a connection between every input and every output neuron in the layer.The inputs to create a dense layer is following:1. **The input size and output size**. We have to define the number of inputs and outputs. The inputs are the number of inputs to the layer, and the output size is the number of artificial neurons the layer has.2. **Activation functions**. Each dense layer must have an activation function (it can also be the linear activation which is equivalent to identity function). The power of neural networks comes from non-linear activation functions.3. **Parameter initialization**. We will initialize the weights to have random values. This is done in practice by drawing pseudo random numbers from a Gaussian or uniform distribution. It turns out that for deeper models we have to be careful about how we scale the random numbers. This will be the topic of a later exercice. For now we will just use simple Gaussians. See the `Initializer` class below.Note that we use Sequence in the code below. A Sequence is an ordered list. This means the order we insert and access items are the same. ![f2.jpeg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4RDaRXhpZgAATU0AKgAAAAgABAE7AAIAAAAFAAAISodpAAQAAAABAAAIUJydAAEAAAAKAAAQyOocAAcAAAgMAAAAPgAAAAAc6gAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAERUUDMAAAAFkAMAAgAAABQAABCekAQAAgAAABQAABCykpEAAgAAAAMxMQAAkpIAAgAAAAMxMQAA6hwABwAACAwAAAiSAAAAABzqAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjAxMjowODoyNCAxNDozODo0MAAyMDEyOjA4OjI0IDE0OjM4OjQwAAAARABUAFAAMwAAAP/hCxdodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvADw/eHBhY2tldCBiZWdpbj0n77u/JyBpZD0nVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkJz8+DQo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIj48cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPjxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSJ1dWlkOmZhZjViZGQ1LWJhM2QtMTFkYS1hZDMxLWQzM2Q3NTE4MmYxYiIgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIi8+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPjx4bXA6Q3JlYXRlRGF0ZT4yMDEyLTA4LTI0VDE0OjM4OjQwLjExNDwveG1wOkNyZWF0ZURhdGU+PC9yZGY6RGVzY3JpcHRpb24+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iPjxkYzpjcmVhdG9yPjxyZGY6U2VxIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+PHJkZjpsaT5EVFAzPC9yZGY6bGk+PC9yZGY6U2VxPg0KCQkJPC9kYzpjcmVhdG9yPjwvcmRmOkRlc2NyaXB0aW9uPjwvcmRmOlJERj48L3g6eG1wbWV0YT4NCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0ndyc/Pv/bAEMABwUFBgUEBwYFBggHBwgKEQsKCQkKFQ8QDBEYFRoZGBUYFxseJyEbHSUdFxgiLiIlKCkrLCsaIC8zLyoyJyorKv/bAEMBBwgICgkKFAsLFCocGBwqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKv/AABEIAmgEnwMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/APpGiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAopsis0bKjbWI4PpXiGoeKfF8fxlh8JQayBbyjfv2dB6UAe40VHbpJHbok0nmSAYZ/U1JQAUUUUAFFFFABRRVe/voNOspLq6kCRoMkk4oAsUVBY3aX9lFdQ52SruXPpU9ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVxnxO1DVtD8GX+r6Re+RJbJvClc5oA7OivL/gxr3iDxf4ZTW9a1ISqzFfJC46Vp/FbUNc0Dwlea3o2pfZzbKD5RXO6gDvaMgnAPIrhfhZqWs+IPBdtq+sX/nyXKnChcbTVnwl4e8QaT4j1e71vVTe2ty2beP8A55igDsaCQBkkAeporxX44a94i8PajpI03UzHZ31wqPCB7+tAHtVFQWTFtPt2Y5JiUn8hXL/EyXV7TwVe3uhah9int4y27bnPFAHXgg9DmivO/ghrOoa58N7e81acz3BkYM57816JQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFcn4+8eWfgjS0lkUT3k7bILcHlzQB1lFcFoum+MdVsRqF7rRsjcrvS2EefKB6Vk6n4s1z4a6nanxZdf2lpF5II/tm3BhP0oA9ToqG0uob60iubVxJDKodGHcGpqACiis2DXbO48QT6RE4a4gjEjgHOAaANKiiigAooooAKKKoWms2l7qdxZWziSS3GXK8ge1AF+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK8C1D/AJOos/8Arka99rwLUP8Ak6mzHfyjQB3nxe8Saz4W8NxahozhAJQkjEZ61jand+PtV8FprOlXq2LwQGUpjPnADNXvjz/yTc+n2qP+ddBZZ/4VT/3Dm/8AQTQBieEvG+qeI/hVLqqRqdSt4mEnPVhnn9K5HwP4v8e+OtE1BLGZY5IpSouyPuH+7im/Ax3b4X+JdzEhXlA9uGrT/Zz/AORa1bHT7Yf60AQfD/4keIrbx1ceDfHoxeEEwXGMbvStMeJvEWlfHC38N3mofarC6hMqrtxt9qoeNNOTUfj7oJ06MPcwwlpyvYA96NeYD9p3SCxCj7HjJ4oA1fi34i1/wlPpeoaVqG23uLpYZLYr1BrJ+PFvrE/hCwvrTVmtrdpYw9uo+8T3zUv7QdxD/ZehxeYu/wDtBDjPTmrXxsdT8L9PYMpX7RDznjpQB1Xw/wBL1ax0G0fUtXN7G8IKJtxtrr6x/C0iHwxpyh1LGBTgGtigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK4j4w8/CrWf+uNdvXEfGH/AJJTrX/XGgDzv4D+ONF0P4cxWd886zLIxOyIsPzrX+LfxA0LVPhnqdpayXBlkUBQ0JAzn1qb9nO2gl+FcLSQRMfNb5mQEmtv402tvH8J9XZLeJSEHIQcc0AN+ELzR/BrT3thmURMVHvVL4UeK9e8QeI/EVrr04dLOQCFAPuDNaPwa/5JBpv/AFyaub+EAJ8ZeMcdfMxQB0eqeJ9T1/xo/hrwzcfZvsnN1dgZ2+1eXfG/TNb07V/D7arqp1CF7ldilcbDmt74Ua7DY/FzxPo+onyrq4kDRFzgtij9oxlF54ZBYA/ahxn3oA7zxl41/wCEW0LTrWyTztTvkSO3j/AAn8K5n4h6J4th+Hd7dvr5nAgLSW5TG4EdM+1Y3xSvhpHxG8D6pcgNZpGqliflBNek/EeeOX4ZalMki+W9uWDA8HIoA5D4H6lBo/wUjvrxgkcTOST3PpWt5/iDxRpR1jTfEUelLIrFLXg4x0/OuB8MW897+y3cRWKmWZZGfYp5IBrqPhMfCvinwTbMSBeQDZcRNLgqw9qALHw5+ImqeJdF1uw1CPGraUGxJjAlx3rK+HvjHxl41tNWtBKqSRylEusf6nnpivQbOw8PWa6nHoccYuY4H85oznt3Neffs8Zx4i/6+z/M0AXPAnjPxDa/Ee98HeKboX0y5aKcDGBXr9eB2mf+GsZ/+uTfyr3ygAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvnTx5cf2z+03odhcgmC22kITwT64r6Lr59+LOnT+GfjJofjEW5NhuVJ5B2PvQB9BAADAGAK86+OdjBefCvUmuEDGFd6H0Nd9ZXkN/ZQ3Vs4eKZA6sDnINeb/HjWktPAMulQqJrzUm8mKJTlvrigDk/AWta837PL6tZ6mYZ9PDbcjO5VIGP1r0fwPquqeKfhzDdz3flX06cTgfdPrXH6X4Xn8Jfs231heDbO9qZXHoSQa6X4Pyxp8MLF2kQKF5JPAoAx/ht4j8Q+Jo9e03UNSzcWrtFDchfukHGa4b4V6X4hv/H/AIhjTxC8dzA+JJiuTIM9K6r4JOja/wCJcOpzdORz15rP+DMiJ8TfFhd1UCQ/eOO9AF/xn448X+H/AIhaTpVqBJbuQrJj/XmqnjXXfiR4OlHiCWcTaTvBltgv+rUnpVzx8Qfjh4WOQV8wYPbtXc/FJoh8M9Z8/btMBGG7nNAHKeM/G2pXPwuh8Y+FNS8hFVTJFtzuJ6iuttL3Utb+G1tqFvefZb2W180y4zzjNeSvp9zpv7Kk0V1EY2Zg6r/sk8V6n4Xnhi+Edk7yoFGn9d3+zQBz3w18Qa/428A6j9q1Hyb+Kd4UugvTHfFcj8F9J8QXOqavIPELjyrlllBXJk5roP2fpEbwbq211JN7KcZ571H8DJI1vPERaRRi7fOT05oA9kQFUUMckDk+tLQCCMg5BooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAGyqzxMqNsYjAb0ry+7+D1xdeNF8THX5Vv1Pyts6D0r1KigDivG3gO68Z6LBptxq7wwoQ0mF++w71bg8J30Hg1tCXV3JMflibbyFxjFdVRQB5x4a+Hf/AAr/AMK6xb2+oPcwzwyOyMuPmwea4b4F6Nqt5omrzaZq7WUZuiDGFzzzzXt+u6bNq2kTWVvdG1MylTIBng1y/wAPfh0/gKO4hh1R7mGd97Iy45oA0vDXguHRL6bUr24N9qcxO65cc49BWb47+G0Xi+9ttRs799N1K24S5jGTiu5ooA8w174Nx+I9DtrXVNXmlvIJBIbrHLY9q6LXvAFl4g8Cp4bu5m2RqNs3cMO9dbRQByvgvwa3hXTo4Lq/k1CaIbI5X42r6YrqqKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArmfGvhSfxdos2ljUGtbeddsgVc5FdNRQBwvw/+Hlx4DtRZW+rvcWKnIhZcc1c8deC7rxnpr6d/ar2lnKMSRqud1ddRQBxnhPwRe+FfDh0eHWHlgVSsRK/czVLwd8NLjwj4gu9Sh1mSZbxt08TL96vQKKAPO/GnwksfE2uwa7pl2+latEcm5iH3vTNZ2t/Bu58TrZP4h8Qy3lxaEESFMZxXqtFAHH+JfhzpfinwjDoupEsbdAIZ/wCJCO9c7D8JdVfw4+g6j4pnutMK7ViZeVH1r1KigDkfAfgC18EaA+kxTtdQvnO8dj2rjL/4BW6+ILjUfDmtT6Slw254Y+h9a9hooA5XSvBEOi+G59P066dLq4TbNdtyzH1rK8BfDSXwNc3b2+rvcR3TF3Rlx83rXf0UAebR/CiaP4iN4tGtSfa2OCuzjHpXpCghQCckDk0tFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFUNZ0Ww1/TZLDVbdZ7eQYKsKv0UAcBaeANd0oG30bxRNa2Kn91AUzsHpmrel/DyFdaTWPEV22r6hF/qpJBgJ+FdpRQBU1PTbfVtLn0+8TdBOhR19q8+8M/Ca48PXMkH9vzT6QzEiyIwAD2zXplFAHBeCPhfb+C9cv8AUIL6Sdbp2ZY2HCA9qrSfCO1j8Zza3p9/Jaw3JzcWyDiT8a9GooA8O+Jti918WPDVhaTG2diFjlHJWu5uvAV9rEsMXiDW5L6wRtz2xXAf61U1z4YXGteMLbxA+tyRz2rboVC8LXfW0ckVuqTSmVwOXPegDO1bw5Yav4ak0SaILaPGIwoH3QOlcV4f+E1xpEUtlea/PeaawIitiMCMV6XRQBw3w9+Glt4C+2rb3j3KXTlgrcBM9qo6T8I4NG8XXWqWWpypZ3bF5rMdGP1r0eigBsaLHGqIMKowKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFctrXxF8P6BrMOlajcPHdzOEjTZ94n3rqa8J+OEUZ+InhN9g3CUc4/wBqgD3UHKgjvS1UutQtdNsPtF7MsUarkljjt2rAtfiP4dur+O0FzJDJKcIZo9gb6GgDqqKxta8V6T4edF1W48gSDKsRx+dXNO1a01Wx+12Tl4cZDY60AXaKxrDxVpepao+n2kzPcR/fXb9361HrvjDSfDuBqEkjHGSIU3lfrjpQBu0VlaH4m0rxHY/a9Ku0ljH3gTgr9R2qu/jTQI5LlDqMRNqCZSGBAxQBu0Vk+HvE2leKLFrvRbkXEKttYjsa1qACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAr317Dp1lJdXJIijGWKjJxWL4X8c6J4vmuo9DuDMbU4kJXGDW3egNYThgCPLbg/SvFf2fY1j1jxQEAA+0n+dAHuNFYes+MNH0OZYbydmlPVIV3kfUDpUmieKdJ8Q2kk+l3IlEX30xhl+ooA2KK5y38eaBdah9hhuy11nb5W35vyrT1bW7HRLQXOoyGOI/xYoA0KKoW2tWN1pQ1FJdtsRkO/FYUHxL8Nzaklk11JBK7bVM0exWPsTQB1lFUdS1nT9IsvteoXUcMPGGZhz9Kx7v4heGrKa0iuNRRXuziIev1oA6aisDVPGujaRciC5lkdyAf3Sb8A+tX9I1yw1yBpdOl8xV4PGMUAaFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXKzfEbw/B4mh0CS4cahM21YimK6qvCPG8Ua/tJaC6oAxReQPpQB7vRVLVNXstGtGuNQnWJB0GeW+g71j6Z8QNA1XUlsILl47h/uJMmzd9M0AdLRWFq/jLRdCvBbapdeRIRkbhwa0rfUre50/7bCWMONwOOSKALdFZGk+J9M1u4mh06YyND9/5cAVQ1j4gaDod00F7PISv3njj3Kv1NAHTUVQstb07UdLGo2l3FJakZ8wMMD61lTeP/DkOlSai+op9mjfYzj1oA6Siqmmanaaxp8d7p0yzQSjKup61boAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArwz43/APJQfCf/AF1H/oVe514N8bLlpviF4c+z2lzMLWUGVo4yQOaAPQfiH4Vh8R2FjNeaqbC2s3WV13YEmOcVwnxhvrLV/B9ldaTZlIrS5QLc7djKe2Pap/jbNq0p8P31nFcS6KjrJdRRqdx57j6VW+KvilPEPw1hPhnTJ5bNJ4zMTCVZMeg70Adrr3hqPxx8J4oL4lrkWvmJIOu4Dis34SeLRL4CubO+jEV1ogaN4zwWAya6n4e6k2peD7RjbvCscYQCRcE8eleX+LPCepaX8YrUeH5CLfVj5l1EOBt70Adn4YtnsPDWveKQgSe/V7mI45UAH+tZHw91HxBf+Hn1L+x7e/a+kLSSyPnPJ4xXplxpEL+HZdKgAjieBogB2yMV4D4U8Xaz8HtcvtA8R6bc3GkmUtDcRoWwM0AdV4F+H/iHRPiZqeqXcCW+jaijbrZJMhWPoKw/AvhLTb74u+J7KdWfT0kbFszHGfWvSPCvjC+8V3zapb2slvokUZ/1i4Z2x1xXBfD3V0i+M+vvLaXSQ3kpMMjREA/jQB6/4e8L6R4WtZbfQ7RbWKV97qp6mteiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIbz/jxn/wCubfyrxX4AjOseKQDgm4PP417LqlwlrpVzNIGKrG2QoyeleJfAqaaHWPEsUlrcQS3EjNCZIyAeaAOt07SNL8F+LdTvtQ1BtSvNTICQ7d5i9sdq5fwQZbL486zDFF9kjuV3PAD8v5VS8A+IJPDPj/XU8a211JfXUuLeURF1IzwB6UaXrOoW/wAf7+6vdKmjM6qsJVCVK+pNAGj8RdNTwJ8SdK8ZWMBeGeTy7pP4Vz3rsfHVyniGz0vQrUK/9qFZQ4Odqjmt3xr4fh8UeD7zT7j5N8e9WxypAzXAfBDStTks57rXSZltZDHZuxzgA4oAl8d317beM/DHhnTLdJ7ZkzLbltocr0pvxD8LeIfGvhwWUfh+2trmJw0E6SAFMe9Hxu8Pay0mmeKvDMTSXuktuZV6suelGifGxtcso7GHRbpNbYBWiaMhVPrmgCj8RNAu4/gLEniPLapZ7FEiv905/Wtvwf8ADnQNW8EaZc6raC7vBEGEzMc7u1V/ixLewfCN7K/Sa61CdlbbEhboc4rpPhvqa3vw7tfJililgg2lZU2kMBQB5xp3jaT4d+N9R0vxvprixumBivdm4BR0GfpXq3g6LRZrebU/Dlyk1peEOFQ52n+lcxZeIdE8X2l3o3izTnea3LBmkh+8PY1m/Bfw1c6DrOvPa+dHossubSKXPHPvQB69RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV4X44/5OP0D/cX+le6V4F42vd/7QejXaWl09vAFWSRYiVB470Aeg+O/DFpf65pmuapqrW9rp8gc2ueJfbHeuC+LN9Ffaj4c1fTLU2yJdBY7jbsY89CKn+K11qVl8S9B1O9guLjw7FtdkiUnDdyRVX4veJH1zTvD93o+mTyaXHdh2kERDA8cbaAO0+KHhJPFnw9F5gm/s4RPEy9WIwcU7wX46jvvhQdSliVbiwi8mSHvuGB0rrvDV6dV8N20ssBiVowuxxg4x3FeN23hDUdK+NEukafIX0e5/wBIuY+wB56UAdHdrceCfg5qepWmEvr0mZW9N5GBVvQYtauPBNvat4btp4ru3DSO0gJcsOtdV428NL4i8C32iwIA0kO2EehHSvIvAnxW1HwVYjwz410u732bGKC4jjLbwOgoA3vh54C1rw5pWv2XiGMLpkyvJbwrJnacVjfBHwbpmveGNWTWEN1bfbXRIHJwuDXo1hrepahoepa1qkEkFjJAwgtwMt064rjv2f7/AMjTdT026tbiC4a7eVfMjIBUnigD1jRtFsNA01LDSoBBbJ91Ac4q/RRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTGhic5eNGPqVBp9FADXijkTY6Ky/3SMimC1t1i8tYIhH/dCDH5VLRQBxHxB8dzeBoLc2WjyX/mnlYlPyj8Kg8FLqHifVD4p1q1NowUx2kLDkIeua7x40f76K31GaUAKMKAAOwoAWoZ7O2ul23MEco9HQGpqKAGRwxwxiOKNUQdFUYFAgiVtyxID6hRT6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAAgEYIBHoaYsMaHKRop9QoFPooAie1t5JBJJBGzjoxQEih4oU3TeShcDOdozUtFAHkF78Utc1bW7rw7pXh+ZJGby0uGUhSvQmvS/DmjR6DocNjGc7fmY/7R5NaIijDbhGob1CjNPoAQgMCGAIPY1BHYWcMxlitYUkPVlQA1YooAa8aSffRW+ozQsaIMIiqPQDFOooAryWNrKQXgjJBznaKnVVQYRQo9AMUtFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUwwxFtxiQn1Kin0UAMkhimTZNGki+jKCKb9mg2BPIj2r0XYMCpaKAPPvHvxIufBepW1nZ6JNfrLjLRqcL+VXvA1lfX0s3iXW4fIvbxdixf3Yx0rsWijc5dFY+4zTgABgDAoAKrzWFpcOHntYZGHIZkBNWKKAE2Ls2bRt9McU1Yo0OUjVT6hQKfRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGbr2u2vh3SJNRv1laGPqIU3N+VcN/wvbwr/z7at/4BNXpTIrrh1DD0IzUf2W3/wCeEX/fAoA85/4Xt4V/59tW/wDAJqP+F7eFf+fbVv8AwCavRvstv/zwi/74FH2W3/54Rf8AfAoA85/4Xt4V/wCfbVv/AACaj/he3hX/AJ9tW/8AAJq9G+y2/wDzwi/74FH2W3/54Rf98CgDzRv2gfBccwhkOoJKRkRtakMfwpf+GgPBn/UR/wDAU1teOPAVvr8C32mRRW+q24zFIEHzj+6a8/0+/jmeW01K1htL+24mjeMAcdx7VyYivOjqo3RMm0dKf2gfBaqWY6gFHJJtTgU6L4++ELiMSW8epTRno6WhINc1puk3HjrUzY6fDHBpULYubryx8/8Asj1Fex6XoWm6Pp0VlY2cMcMS4UBBWlGpOpHmkrDTbOE/4Xt4V/59tW/8Amo/4Xt4V/59tW/8Amr0b7Lb/wDPCL/vgUfZbf8A54Rf98CtxnnP/C9vCv8Az7at/wCATUf8L28K/wDPtq3/AIBNXo32W3/54Rf98Cj7Lb/88Iv++BQBwul/GTw5q+pQ2Nrb6kJZm2qZLQqM+5rv6jFtApysMYPqFFSUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABVXUr+LS9OmvbgOYoV3MEXJx7CrVIQGGGAIPY0AebH46+FlYg22q8f9ObUn/C9vCv/AD7at/4BNXo32W3/AOeEX/fAo+y2/wDzwi/74FAHnP8Awvbwr/z7at/4BNR/wvbwr/z7at/4BNXo32W3/wCeEX/fAo+y2/8Azwi/74FAHnP/AAvbwr/z7at/4BNR/wAL28K/8+2rf+ATV6N9lt/+eEX/AHwKPstv/wA8Iv8AvgUAec/8L28K/wDPtq3/AIBNR/wvbwr/AM+2rf8AgE1ejfZbf/nhF/3wKPstv/zwi/74FAHnP/C9vCv/AD7at/4BNR/wvbwr/wA+2rf+ATV6N9lt/wDnhF/3wKPstv8A88Iv++BQB5z/AML28K/8+2rf+ATUf8L28K/8+2rf+ATV6N9lt/8AnhF/3wKPstv/AM8Iv++BQB5z/wAL28K/8+2rf+ATUf8AC9vCv/Ptq3/gE1ejfZbf/nhF/wB8Cj7Lb/8APCL/AL4FAHnP/C9vCv8Az7at/wCATUf8L28K/wDPtq3/AIBNXo32W3/54Rf98Cj7Lb/88Iv++BQB5z/wvbwr/wA+2rf+ATUf8L28K/8APtq3/gE1ejfZbf8A54Rf98Cj7Lb/APPCL/vgUAec/wDC9vCv/Ptq3/gE1H/C9vCv/Ptq3/gE1ejfZbf/AJ4Rf98Cj7Lb/wDPCL/vgUAecN8ePCaKWeDVFUckmzbAqJP2g/BMqb4nv3Q/xLakivSZtPs54XimtYXRwVZSg5FeP+J/Csnga+a+0+1SfQ5mzJGIwTbk+lZVZTjG8FcTubP/AA0B4M/6iP8A4CmkT9oHwXLKYojqEkg6otqSR+FcvqOsWdrYRy2cMNxNcYWCNIwSSa734e+B00WzbUtXgifVLwbpPkHyDsBWGHxE61242Qotsz/+F7eFf+fbVv8AwCaj/he3hX/n21b/AMAmr0b7Lb/88Iv++BR9lt/+eEX/AHwK7Cjzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAPOf+F7eFf+fbVv/AJqP+F7eFf+fbVv/AJq9G+y2/8Azwi/74FH2W3/AOeEX/fAoA85/wCF7eFf+fbVv/AJqP8Ahe3hX/n21b/wCavRvstv/wA8Iv8AvgUfZbf/AJ4Rf98CgDzn/he3hX/n21b/AMAmo/4Xt4V/59tW/wDAJq9G+y2//PCL/vgUfZbf/nhF/wB8CgDzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAPOf+F7eFf+fbVv/AJqP+F7eFf+fbVv/AJq9G+y2/8Azwi/74FH2W3/AOeEX/fAoA85/wCF7eFf+fbVv/AJqP8Ahe3hX/n21b/wCavRvstv/wA8Iv8AvgUfZbf/AJ4Rf98CgDzn/he3hX/n21b/AMAmo/4Xt4V/59tW/wDAJq9G+y2//PCL/vgUfZbf/nhF/wB8CgDzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAOW8K/EjRvF9+9ppcN6kiruJuLcoMfU111MSGKM5jjRT6qoFPoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDk/FXxG0bwfeR22qRXru67gbeAuPzFYH/C9vCv/Ptq3/gE1ekPDFIcyRo5/wBpQab9lt/+eEX/AHwKAPOf+F7eFf8An21b/wAAmo/4Xt4V/wCfbVv/AACavRvstv8A88Iv++BR9lt/+eEX/fAoA85/4Xt4V/59tW/8Amo/4Xt4V/59tW/8Amr0b7Lb/wDPCL/vgUfZbf8A54Rf98CgDzn/AIXt4V/59tW/8Amo/wCF7eFf+fbVv/AJq9G+y2//ADwi/wC+BR9lt/8AnhF/3wKAPNP+GgvBQlMJa/Eq8mM2p3D8KX/hoDwZ/wBRH/wFNXvHXgFb2T+2/D0EMepxDLpsGJ1HY1xmn6nZ3lvI1xBDbTwcTxSRgFCOv4Vx4jETo68t0TJtHSN+0H4JjQvI1+iDqzWpAFPT49eEpUDxQ6pIh5DLZkg1z+geHZfH2oB3t0t9Cgb5mMYBuCO30r2K10uxsrWO2trSGOKNQqqEHArelOc480lYauzz/wD4Xt4V/wCfbVv/AACaj/he3hX/AJ9tW/8AAJq9G+y2/wDzwi/74FH2W3/54Rf98CtRnnP/AAvbwr/z7at/4BNR/wAL28K/8+2rf+ATV6N9lt/+eEX/AHwKPstv/wA8Iv8AvgUAec/8L28K/wDPtq3/AIBNR/wvbwr/AM+2rf8AgE1ejfZbf/nhF/3wKPstv/zwi/74FAHntv8AHDwxc3EcMdtqgZ2CjNmwFeiRSLNCkqZ2uoYZ64NNFrbg5EEf/fAqXp0oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArkPGPw80/wAWywzmVrO6Q4aaIcundTXX0Umk9GBS0nSbPRNNisdPhWKGIYAA6+5q7RRTAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKo6nrOn6PAZtSu4oFxkB2ALfQd6AL1FebX3xk06SV7XQLG6vbodC0RCH8azm8f/EG4407wrauw6+ZKRx+dUot7ITaR61RXkq+PPiNbc6j4UtEU/d8uUn+tXLX4xRWTrB4o0u5tJmOAYYy6j6mhxa3QKSZ6dRWXo3iTStehD6beRykjJj3DcPqK1KkYUUUUAFFFFABRRRQAUUUUAFFFFABUc8EV1A8NwiyRuMMrDIIqSigDiNB+F2k6F4mm1aN3mGc28D8rBnriu3oopJJbAFFFFMAooooAKKKKACiiigAooooAKKKKACis7Vte0zRIDLqV3FDgZCsw3H6CuEvPjHaXUjW3hrTrm8uVOMyxlU/OmlcD0yivJW8ffES4/5B/hS0cDr5kpH9aF8ffES3/wCQh4VtEz08uUn+tPkl2J5l3PWqK8ztPjJZ2zrb+I9OurO4J6xxlkH413ek69pmtwCTTbyKfIyVVhuX6iptYo0aKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArivE/wy0vxJrMGoGR7Vgw+0LEMCdfQ12tFJpPcCCzs7fT7SO1s4lihjXaqqOgqeiimAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFQi8tjcfZxPGZv8AnnuG78qAJqKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorg/Hfiq5jnTw74fbOo3PEki/8ALBT3ppX0Buw/xV4+NrdHR/DUYvtUfjI5SP6n1rmrXwRJqE4vfGV9Jqk+dywsSFiPoK29B0C10Cz8uFd9w/M0zcl27mtOuuFJR1Zzym3sRW1pbWcIitbeONB0AUVNn04+lJRWpkLk9+frTJYop4mjmhjdW6gqKdRQByt/4FtxMbzw1cyaRfdfMiJ+b2xWr4c8eXen3sejeNIxb3B+WK6/hk9ye1atU9V0mz1uya1v4hIpHynup7HNZTpqWxpGbR3asHUMpBUjII70tea+C/EN3oOrf8It4glMgJ/0G5f/AJaD+7+FelVyNNOzOhO+oUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAhIVSWOAOSTXBeKPH0xuzo3hGIXuoNw0o+5EPXPrUfjrxLc3d+nhjw8+bqb/j4mT/AJZL3Bp2i6JZ6BYi3skG5uZJDyWPfmtadPm1exnOfKYll4HS5uBfeK7uTVrs8gSEgR+wrqILeC1iEVvBHGi9AFFPorrSS2OdtvcXJ7cfSjJ78/WkopiGTwQXMRiuII3RuoKiuWvPAsdtObzwndyaRd53N5ZJEh9DXWUUmk9xptbGf4Y8e3Ed8ui+L4hZ33SKb+CUe59a9ABBAIOQehrgNZ0Sz16xa2vUG7rHIOCjdjmo/A/ia5sNQPhbxHIftMYzazt/y1T3PrXJUp8uq2OiE+bRnodFFFZGgUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBxfjnxNdWl9ZeG9FIGq6mCY2P8CDqam8MeAotBvBf3V/Nf3xB3SyH1rnPFagftA+EWHU2k39a9RoAKKKKACiiigAooooAKK5Dxl8S9D8D3dta6sLiS4uVLRx28e8kDvXOf8AC/vDH/Pjq3/gKalzitG0NJvZHqVFeW/8L+8Mf8+Grf8AgIa67wZ460jxzYz3OimUC3fy5UmTayn6UKUXs7g01udJRRRVCCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiijOelABRRRQBleJtdg8N+HrrVLnlYEyF7sfSvO/A+mTrbT65qZMl7qLGRXbqIz0FT/Fa7fUda0TQbVtwkn33SD+5xXQpElvEkEfCRLtX6V0UY/aMaj6C0UUV0mAUUUUAFFFFABRRRQBheMNEfWdEL2Z8vULX95bzDquOTXT+AvEqeJvDENw3FxD+5mU9dy8E/jVYcnB6Hg1yvguX+wfitq2nyHZaXqKbZe27vXPWjpzG1N9D1miiiuY3CiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKOtFABWF4x8QxeGfDVxfynD42RD1c9K3a8p+JM51rxzovhtTugYGabHRSDxmmld2E3ZXHeCtIlstOl1PUBnUdRbzJye3pXSUp4AX+6Av5Uld6VlZHI3d3CiiimIKKKKACiiigArnPGukve6SNQsspe6efOV16sB/DXR0qhWO1xlTwQe4pNXVmNOzuaXg3xCviXwxa3/AmZMSp3VvQ1u15R8NpzoXjjWdBnYj7ZIbmBD2UZ6V6vXA1Z2OtO6uFFFFIYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/kv/AIQ/69Jv616hXl/iz/kv/hD/AK9Jv616hQAUUUUAFFFFABRRRQB418QgP+F6+HMgH/QZOCM9zW0VTJ/dp/3yKxfiF/yXXw5/14yfzNbR6mvzDiv/AH9f4V+p9Fli/c/MVFTzB+7Tr/dFYHwg1jS9K1vxguo39taM2ogqksgTIweRmt9Pvj614NceDv7b8Z69rV2jPZW98ItqkjLnpn2ro4R/3mp/h/UzzRfu4+p9Uf8ACZ+Gv+g9p/8A4EL/AI0f8Jn4a/6D2n/+BC/415Jpuj+CtPnhsPGPhKC2lkAMd3EWaFl9S2eK7u1+FXw8vrdZ7PRLOeJujxuxB/Wv0c8E6D/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xr5e8b/FvxHoPxIu20PWzcWCt8kaNlCK+hZPhF4DjjZ28PW+FGTgt/jXx58Rk0qLxxfQ6DD5NpG5VUz0IoA9j8OftSz+YkXiLTUCDrLDkk16z4b+M/g7xKwW11EQOe1x8nNfDVOQ4bv8AgaAPr2W7g1f48XgtJkuIoLFHDo2VzgV2pOTmvAv2fkurTxXqH9o7sy2w2FjkkV75XZS+A5qnxBRRRWpmFFFFABRRRQAUUUUAFcP4puoNJ+KfhG7uZkghlkYSSOcAfU13FeO/tAWM2qy+HLG0QySvIflHp3rOr8DLh8R7ufGfhoHB13T/APwIX/Gj/hM/DX/Qe0//AMCF/wAa8d8G6L4Ge1trHxL4bgjlb5FvNxMbt3BOeDXo6/CHwGyhl8P2xBGQQzc/rXEdRt/8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NH/CZ+Gv+g9p/wD4EL/jWL/wp/wJ/wBC9b/m3+NH/Cn/AAJ/0L1v+bf40AbX/CZ+Gv8AoPaf/wCBC/40f8Jn4a/6D2n/APgQv+NYv/Cn/An/AEL1v+bf40f8Kf8AAn/QvW/5t/jQBtf8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NH/CZ+Gv+g9p/wD4EL/jWL/wp/wJ/wBC9b/m3+NH/Cn/AAJ/0L1v+bf40AbX/CZ+Gv8AoPaf/wCBC/40f8Jn4a/6D2n/APgQv+NYv/Cn/An/AEL1v+bf40f8Kf8AAn/QvW/5t/jQBtf8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NR3HjPw39ml269p4Ow4P2hfT61k/8Kf8Cf8AQvW/5t/jUdx8IfAqWsrDw9bghCeGb0+tAHzbf/GbxX4f8a3zWWrG8s1lOyJmymPau+8NftSxSSKnibTRCo4LwZOa8G8W2Edv4v1C10+ErFFKQqLzgVmiwZbdZ5nVYycEZ+YfhQB9x+HPi14Q8TKDZapHEx6LOQhrkbS4i1T4u6ncW8iyx2x2h1OR+FfJwuLazn3WoaXjhmJGDXvHwCklVLh7lmd7v5lZuuK0pfGiJ/Ce2nqaSg9aK7TlCiiigAooooAKKKKACiiigDjrmWK0+PGkXDOqM9mY8E9c17DXzn8V9NvdQ8e6ZLpErxXlpb+cWTrtHpXb+Ffibd21jbL4pt28mUARXcYzj/rp6GuKp8bOqHwo9VoqG0vLe+t1uLOZJ4m+66HINTVmWFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/wCS/wDhD/r0m/rXqFeX+LP+S/8AhD/r0m/rXqFABRRRQAUUUUAFFFFAHlfxJ8HeKtS8c6T4h8JxWk7Wdu0Tx3D7c5NY39lfFr/oE6R/3+r22ivPxOW4PFT9pXpqT2ub069WmrQdkeJjS/i0rA/2TpHH/Tatv4c+ANYsNI8RReMY7dZdZn8zZA24IMHpXqNFVhsvwmEk5UIKLYqlepUVpu55p4Z8qG6ufAniqFLiNMmyaQf62Lnv61JdeC9c8K3JvfA94ZIAf+QbO37sD2rY8f8AhuTVNPTU9MJi1OwPmxSJ95lHJT8a0fCHiSPxNoUd1wlynyXEXeN/Q13GJk+H/iRYX85sNZjbS7+P5XWcbUZv9knrXZqwZQykEEZBHesfxB4U0jxLb+XqlqkjqP3cvRkPqDXFtaeLvALtJaStrmjr8ziU5ljHoooA9NornfDnjfR/Ekai2mMFyetrP8sg/CuioAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKbJIkUbSSsERRksxwAK4fXPiREt0dN8K27apqDcKyDMSn3agDsb/ULPTbVp9QuI7eEdWkbAr4y+MOhWTeJrjWfDVpcnTpmJklZPk39yD6V9JWPgDUdeuhqPjq9afdydNRswrW74s8FWXiHwPc+HoI0tonj2xbR9w9qAPgOrOnWcmoalb2kIy80gUCtnxd4L1Pwfrk+nahHuaLJ3JyNvYmui+B/h99d+JunnZvitHEsg9qAPb9Q0ePwX4i8N3aLhNQgS2fj7rACvQnADnByPWo/iX4dk1rwsZLJN15YnzbdfcVl+F9Xj1rw9BPG2ZIh5Uw9HHWuqjLSxhVXU1qKKK3MQooooAKKKKACiiigBRyRXGwWq+JPjAtsQHTSYyX9twrqNT1CDSdKnvrptsUSnn37VU+FGjTra3niLUEKXmpOeD/AHAeKxrStGxrTWtzM0PTLS21/VfA+sxB7F2M9k7ddzcnB9q0LbUtV+Hl2tjrRlv9GdsQXYGXj9m9hUvxQsJLGOy8V2Kk3GlSBmRf+Wik45rsbSW08Q6DDLIiTQXUQLKeRyORXIdBbtLy3v7VLi0lWWJxlXU5BqavOLrTNU+Ht4+oaJ5l7ojnM9n1MI9UFdroevWHiCwW60+UMCPmQ/eQ+hFAGlRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZEEkTIejAg06q99f22m2b3V9MkMKDJZzgUAfIvxv8Ban4Q8TyatYoxsLsk+cozg+9ePs7OxZjknrX2drkF58XbebSYYTaaBn57lxh3YdNvtXzJ4++GmseCvEE9m9tLPagkxTqpIK0AcbEnmTIg/iYD9a+nfC2m/8ACI6r4VgYYgvrUszdlPpXzbo0Rn12xiAyXuEXH/AhX2f4/wDC883gOym0xB9r0xUcEf3APmqouzuJq6sbB60lUND1aDXNEtr+2PyuuCO4I4NX67zkCiiigQUUUUAFFFFABSqu5gB3pKo63qkei6Jc30jBWRD5YP8AE3YUDMDw/F/wkPxnk1FQHt9OtmtpF7biKuXtpD4M8VS2moQrP4c1pvnDjIjlPQD2rU+FGhSafoE+q3SlLnV5PtEit1XrxXUeI9Ct/EWiT2FyoJdSY3PVG7EVwSd3c64qyscZdeFtb8HznUPBM32myPLadK3yKvqtdB4Z8dab4gJt33Wd+h2vbT/KxPt6is74f67cKZ/DWtMRqGnnYjP1mQfxVqeJvBGneIT9oXNnqKD93eRcOtSM6WivNrXxPrvgmdLLxhC11ZE7Yb6Ibjj1f0r0Cx1C01O1W5sLhJ4m6OhyKALNFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/kv/hD/AK9Jv616hXl/iz/kv/hD/r0m/rXqFABRRRQAUUUUAFFFFABRRRQAUUUUAFea63DJ4A8YJrtoh/sjUH2XkafwyHo30r0qqeq6Zb6xpk9jdrmOZCpOORnuKALMM0dxCksLh43GVZTwRT68+8DancaFq83g/WGO+ElrGQ/8tIueteg0Acp4j+H+l65Kby23adqX8N5Bw1c/H4k8S+BnW38VWr6hpoO2O7txuk+rV6XTZI0ljaOVQ6MMFSOCKAKGka9puu2wm0y7jnGMsqtkp7Edq0a4LWPhtHFcNqHhG6fSbsfMYojhJj/tVDp/xAvtGvV0zxzYm1nJ+W6iGYiPUmgD0OiobS8tr+2W4sp0nhbo6HINTUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVk674m0vw7bGXUrlEfGViB+d/YCgDWrlvEfj7SfD58kM17eE4FtbfMwPuO1c42p+KvH7NFpEb6NozHBuX+Wb8BXUeG/A2k+HSJ0j+1X5GHvJuXegDmItD8V+OXW48Q3TaRp3VLa2PzSr6NXcaJ4d0vw7a/Z9JtEt1PLFRyx9Sa06KACsTxb4jh8M6FLePh5j8sEXeRvQVsyypDE0krBEUZZj0ArzbS45PiD4ybV7hT/Y2mSbbVG/jkHU/SgC54X8CWt9ok934rtkvb3Usu5lGSiMOF/CovAXwi03wB4n1DU9MmZ47tNqo3/LPnoK9EooACAQQehryTUtPk+HniZ7mJCdD1B8tjpFIepPpXrdVdS0621bTprK+iEsEy7WU1UZOLuhNXVjmEdJYlliYPG4yrDoRS1yd1a6r8Obry5RLqOhSN+7kAy8PsfYV0enanY6vAJtMuY7hcZIQ5K+xrsjNSWhyyi4lmiiirJCiiigApQM59ByTUN5d22n25nv50t4h/FIcCuXF3qvjq8On+Hle10wHE96wwWH+z61MpKK1KUW9hJopfiB4kXSbPJ0ezcNdzD7shB6A161bwR2trFbwqFjiQIoHYAVR0HQrLw7pUdhp0YSNOWPdz3JrSrilJyd2dMVZWIL20ivrKW2nQOkilSDXAfDa7l0bV9U8H3jlmsX82KRv41Y9B9K9Grzn4kWz6Hqum+LrMFRZyBblV/5aKTgZqSj0YgMCCAQeoNcJrnhC80i/bXPBz+TcL80tnnEcw7/AI12llcpe2MFzGQVljVxj3GanoA5zwt4xs/EcJiYG2v4+JbaXhge+B6V0dcp4o8Fx6vKNR0qU2Oqx8rPHxv9jVfw340ka8OieJ4/sWqRfKGbhJvdT60AdnRRRQAUUUUAFFFFABRRRQAUUVy/ijxpb6Hizsk+2anJ8sdvHyVPq3oKANLxB4k0/wAOWRuL+UbiP3cK/fkPoBXIWWjap49vU1LxKGttJQ5t7EceaPVxV3w/4LuLm+GueMJPteoMd0cBOY7f2FdwBgYHSgCO3t4bS3SC2jWOJBhVUcAV578UbganPp3he1RWvL9w+7GSqA8ivQridLa2lnlOEiQux9gM1514EgfxN4q1HxZeDfDvMenk/wAKdDQBl6h+z74fk1yx1XSibSW2ZWaJfuuR3r1kwq9t5MgDIU2sD3GMVJRQB5HcWknw98TypICdE1F8o3aBs9K6wEFQynKsMgjvXQ6zo9nruly2OoRiSKQd+qnsR715jL/a3w9uPs+qiS/0UtiO5UZdB/tV0U6ltGYzh1R1tFQWN/aapbifTrhLiM90OcVPXSYhRRRQIKKKgvr600y3M+o3EdvGBkFzjP0oGT8BSzEKqjLMegFclHZy/EXxMlvEGXQ9NkDyS/8APSQdAPUUsLar8Qbo2mlJJZaKjYnuGGGk9l9RXp2j6PaaHpkVjYRhIoxj3Pua5qlS+iNoQ6suoixoqIAqqMADtS0UVzmxwvxB0O5jaHxPoo26hp4zJt6vEOorpvDuu23iLQ7fUbU4Eq5ZD1Q+hrTdFkQo4DKwwQe9eaoz/DvxuyOT/YWrPkMf+WUp6Ae1AHo11awXts9vdxLLFIMMjDIIrgL/AMFar4Zu21PwPcsFBy2myH90R7e9eiAggEHIPeigDkfDfxAsNXk+xaip07UUO1oZ/l3t/s56111YHiTwbpfiWPddReVdqP3V1Hw8Z9RXJx674h+H8qW3iOKTU9JztivIhukH+9QB6XRVLStYsdas1udNuEnjI52nO0+hq7QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/AJL/AOEP+vSb+teoV5f4s/5L/wCEP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAch4/8Nyapp8epaZmPU7A+bHIn3nUclPxrR8IeJI/E2hR3XCXKfJcRd439DW9XmutQyeAfGCa7aIf7I1B9l3Gn8Mh6N9KAPSqKZDNHcQpNA4eNxlWU8EU+gAqrqOmWerWbWuo26XEL9UccVaooA84u/BOt+GLo33ge8LRg86dO37sD2rS0D4kWV7N9h1yJ9Lv0+VhONqO3+ya7WsfX/C2k+JINmp2qSOo+SXHzIfUGgDXVgyhlIIIyCO9LXmbWPizwC7SafM2taODudJTmVB6KK6nw7440jxGipBKbe6PW1n+WQfhQB0dFFFABRRRQAUUU2SRIYmklYIijLMTgAUAOqtf6lZ6XbG41C5jt4h1eRsCuN1r4jxm6OmeFLZtTvm4WRBmJT7mq9h4A1DW7pdQ8c3rXDk5Ono2YVoAju/HOteJrlrHwNYEqCRLd3I2pj1U960NC+G1pbXAvvEVxJrF9ncGuOREfRa7G0s7ewtUtrOFIYUGFRBgCpqAEVQqgKAAOgFLRRQAUUVheLvEkXhnQpLs4e4b5beLvI/oKAOc8c6rc6zqkPhDRXPnTkG9kX/lnFx0967PSNKttF0uGxs1CxxKFz3Y+prm/AHhuTTbGTVdUzJqeoHzZHf7yKeQn4V2NABRRRQAUUUUAMmhjuIWinQSRuMMrDIIrz3W/hXEs7X3hK7fS7gfN9njOI5D716LRTTsB47LqHjzw8ude0qC6gHRrT5mIqGP4qWRfZN4d1iMjgs0JxXtFMkijlQpKiup6hhmtFVkjN04njcvxU0+NsJoGrzD+9HCSKkh17xl4gGPDOirAD/FfDbgV6/FbwwJthiRB6KMVJQ6smCpxPM9J+GF3qNwt741v3umPJsUbMQr0WysrbTrRLWxhSCGMYVEGAKnorNtvctKwUUUUhhVPV9Og1bSbiyuYxJHKhGD69quUUAef/C7UZreG98M6k5a902Qn5v7hPFegV5r42ifwv420vxRbDy7SR/L1Fh3XoK9HhlS4gjmiOUkUMp9QRmgB9YviTwvY+JbMR3S7J4+YbhfvRn2raooA890zxHqPhDUI9G8W7pLVztttQHIx/tntXoEciTRLJEwdGGVYHIIqtqel2esWElnqEKzQuMFWHT3rgc6v8N7n5zLqWgM3J6yQ/wD1hQB6TRVXTdStNWsY7uwmWaGQZDKc49qtUAFFFFABQSFBJOAOSahu7u3sbV7i8lWGFBlnc4Arz671fVviBePp3h8yWOkI2Li9PDP7J6igC7r3jK71G/Oh+Doxc3bfLJc/8s4h359a1PC/gy10AG6uHa81KXmW6l5bJ7CtPQvD9h4esBa6fEFH8b/xOfU1p0AFFFNkkWKJ5HOFRSxPoBQBw3xP1i4i0230PS2/4mGpOFVR3jzhq6nw/o9voOhW2nWi7Y4U6e561w3hWNvF3xBv/EV0C1rp7GLT27EHrXpdABRRRQAVFc20N5bvBdRrLE4wyMMgipaKAPN9Z+FrW1y194OvX0+Uci0BxExrFm1jxv4eX/io9HjuIh0ayG4kV7FRVxnKOxLinueLxfFSyeTZN4f1eE93eEhaJPipYq+yLw9q8pPRkhJGa9llginjKTRq6nswzRHDFCm2KNUX0UYq/bSJ9nE8gi1jxx4gXHhzR4rdD1a9G0gVs6P8LGurhb3xhfSahJ1+yE5iU16TRUSnKW5SilsRW1rBZ2yW9rEsUSDCoowAKlooqCgooooAKyfEug2/iPQ57C4UbmXMb90fsRWtRQBw/wAP9enIn8N6yxGo6cdil/vTIP4q7iuF+IGh3MMsPinRBtv7D/W7eskQ6iun8P65beIdFg1C0PyyrlkPVD6GgDTpssSTRNHKgdGGGVhkEU6igDz/AFXwJe6PetqvgW4+yz53NYscRSH1NXPDnxAhvJ/7N8QxHTNTT5WWT5Ukb/ZPeu0rF8ReFNM8TWvl38I85R+6nX78Z9RQBtdelFeaLeeJfh5KE1ESaxomcCcfNMn1HpXc6Lr+na/aCfTLlJhjLKD8yexHagDSooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAqnq2mW+saXPY3a5imQqfUZ7irlFAHn3gbU7jQtXm8H6w3zwktYyH/lpF7+9eg1yHj/AMNyapp8ep6ZmPU7A+bG6fecDnZ+NaPhDxJH4m0KO54S5T5LiLvG/oaAN6iiigAooooAK5XxF8P9K12Q3UG7T9Q6i7t+HrqqKAPNIvEPibwM6W/ia1fUtNB2x3UA3SfVq7rR9f03XrYTaZdRzgDLKrcp7EVfkjSWNo5VDIwwVI4IrhdZ+G0QuDqHhS6k0m7U7vKhOI5T/tUAd5RXlkvxabwbMdO+IcAt7sLuSWAZRx/jT4Na8RfEuLf4fcadoMhwbrOJj9KAOm8R/EDSdAPkRlr69JwLa2+ZwfcVz0WgeKfHDrceI7ptK0/70dtbH5pF9GrqPDngfSfDmJoo/tN7/Hdzcu1dHQBm6L4e0zw/a+RpVpHAp+8VHLH1NaVFFABRRRQAUUUUAMmmjghaWZwkaDLMxwAK840eKT4geMn1m6Rv7H059tpG/wDFIP4vpVnxzqdxrurQ+D9Gb55sNfSD+CL2967XSdMt9H0uCxtFxFCgUepx3NAFyiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMvxJo0Gv+HrrT7lN6SISB/tDkfrXNfDDWZ7nRZdH1R/+JlpzlJEPUJnC/pXc15p4hRvB/wASrTXYgVstUPlXr9lx0oA9LopqOskauhyrAEH1FOoAKbLEk0TRyqHRxhlI4Ip1FAHnmpeHdT8HX0mreEyZbJjun08/dUdyorqvDfiew8TWPnWbFJV4lgfh4z7itmuL8SeDJ/t/9t+FZfsmppy8Y4Sf/eoA7Ss3XNfsPD9i1zqEwQY+VM/M59AK4tfizbQwPp93aSjXk+T7GBy7eo9qt6H4NvNUvhrfjNxPdMd0VmDmOEdvxoAp2umar8Q7xb/Ww9loiHMFmOGmHq4r0G0s7ewtUtrOJYoYxhUUcCpgAoAUYA4AFFABRRRQAVxnxM16bSvDn2TT/mvr5xCiDqVPDGuyJCqSxwByTXmmlg+NPindahLk2WhnyoP7shPegDs/Cegw+HPDVrp0HOxdzE9Sx5NbNFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAI6LIhRwGVhgg9681iZ/h545aJiRoerPuDN/yzlPRR7V6XWR4n0CDxHoU9jOo3sMxOeqP2IoA1wQRkcg0VxPw/1+eRJvDusMV1HTj5YL9ZkH8VdtQAUUUUAI6LIhR1DKwwQe9cJrPw9a2um1TwbOdOvQdxgU4jmP8AtV3lFAHDaH8QgLsaX4sg/s3UF4MjDETn/ZNdwrK6BkIKkZBHeszXPDmmeIrTyNUtklwPkcj5kPqDXDlPE3w7kLRmTWdDzk55mj9gPSgD0yisjQPE2meJLQTadOGfGXhJ+eP2IrXoAKKKKACiiigAooooAKKKKACiiuU8VfEbw/4SiP265E04ODbwHc/5UCbS1Z1dFeGar8ddWvn/AOKT0hdnrefLWU/xU+IE7b5LeyhPTah4qHUgupxVMwwlN2lUVz6Ior53T4rfEG2yY7Wxnz1Eh6Vt6T8ebizTb4s0l1k9bRdwoVSL2Y6ePwtV2hUTZ7bRXP8Ahrxroniq2STTLtDKwyYGOHX6iugqzt3CiiigAooooAKKKKACiiuY8Xp4sKwP4RNsXU5kW4OARQB09Fcn4S8Xy6tcTaVrUItdXtR+9Qfdb3WusoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAK811qGTwB4wTXLRCdJ1B9l3En8Mh/jPtXpVU9W0y31jS57G8XdFMhU+oz3FAFmGaO4hSaBw8bjKsp4Ip9efeBtTudD1abwfrLfPDlrFz/HF7+9eg0AFFFFABRTZZUhiaSVwiKMszHAArhdZ+I6SXR03wlbNqd63CzIMwof9o0AdlqOp2Wk2pudSuY7aEfxyHArgrrxvrfii5ax8EWBVASJLy5GEI9VNSad8Pr7WbtdS8dXrXUpOTYI2YFrvbS0t7G1S3s4UhhQYVEGAKAOM0r4Y6ftabxQ51u5k5P2oblQ+gqne+Cda8N3Z1DwPenYOunTHEQHtXotFAHE6D8SLK7m+w6/E+lXyfKRcDakjf7JrtVYMoZSCCMgjvWTr3hfSfEcGzVLRJXUfu5MfMh9Qa4l9P8WeAWaXTJ31rSAd0kcxzKo9FoA9NornPDnjnSPEQWOKU214etpP8sg/CujoAKKKKACsHxf4kj8M6FJdcNcv8lvF3kf0FbU88VtA807hI0G5mboBXnWjQSePvGDa7dof7I099lpE/wDFIP4xQBs+APDcml6fJqep5k1O/PmyO/3kB52fhXYUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjXw+niXwpd6exwzDehHUEcit+igDkfhx4gfWvDKQ3nyXtmTDLGeoC8A111eZXGfBXxYW5/5c9fwj/3YmFemgggEHIPSgAooqO4uIbS3ee5kWKJBlnY4AFAEnTrXE+IvGlxNff2J4RiF5qMnytN1jh9cn1qhf67qvjm9fS/C+6201Di4vzxuH+wa67w94asPDdl5FkmZG/1szfekPqaAOSX4VQSWZvLi9lbXj84v/4lb0HtVrQvGN3pt+ND8Yx/Z7pflju/+Wco7c+td1WZrugWHiHT2tdQiDD+Bx95D6igDTBDAEHIPINFec2er6r4AvU03xAZL3SXOLe9HLRj0evQba6gvLZLi1lWWJxlXU5BoAlooo6UAcp8RvER8PeEppIRvuLgiCNB1+bjI+lWfA3h4eHPC1taMd87DfLIerE881yuT41+LDKebHQPldD92Vj0NemAAAAcAUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHCeP9Fuba4h8VaIMX1jjzgOskXcV1Oga3beINFt9QtD8sqglT1Q+hrQdFljZJFDKwwQe4rzaBn+HfjdoHJ/sPVX3Kzf8spT0Ue1AHpdFAIIBByDRQAUUUUAFBAIweRRRQBxHiD4fJLcnU/DE7aZqKncREcJMf9qodH8fXNhfDSfG1v8AYbscC6A/cv8Aj613tZ+saHp+vWRtdUtknj6jcOVPqKAL0ciTRLJEwdGGVYHIIp1eaSaX4j+H0jXGkSyato4O6WCU5kjHotdd4d8X6V4lhBspglxjL20nEifUUAbtFFFABRRRQAVHcXEVrbvPcSLHFGMs7HAAqSvC/ij42k8Q6i3h7RZyLOE/6VKh4f1WplJRV2Y168MPTdSb0RL44+Kl3rMsuk+EHMUA+WW96H/gNcBDp0STG4uXa7uj96eU5Y1Yhhjt4VigUKi9AKfXDOo5n5/js0rYuVr2j2/zFyaSiisjyQozxggEfSiigCmbAw3H2rSriTT7vr5sJwTXqXgP4sN5kWjeLiIrjhY7r+F/qfWvOahurWK9gMUwyOoPcGtoVXH0PZwGbVsLJRk7x7f5H1QjrIiuhDKwyCO4pa8i+E3juQyjwxrs3+kJxbSuf9YPSvXa7k01dH39KrCrBVIO6YUUUUzQKKKKACiiigDzHxq39n/GDwhJb/I19K0cpH8QHrXp1eX/ABD/AOSteAv+vl69QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAOP8f8AhuTVNPj1LTMx6nYHzYnT7zgc7PxrS8IeJI/E2hR3PC3MfyXMXeN/St6vJvFurWXwq8WDXTMP7P1A4uLOM/OXP8QFAHrNcp4j+IOlaC5t4d2oX2dv2a2+ZgfeucGo+KPiIAdJB0jQpQMztxMw9RXWeHPBGk+G1WSCL7RefxXc3MjfU0ActF4d8UeN5FuPE122maf96K2tThnX0eu60bQNM0C18jSrSO3U/e2jlj6mtGigAooooAKKKKACiiigDlvEfgDStedrmIGw1A8i8t+HrnI9f8T+BZEt/Edq+qaYPljubcbpAPVq9MpskaSxskihkYYIPQigDP0bxBpmv23naXdxzgD5lU8p7EVpVwmtfDWA3H2/wrcvpF2p3eXCcRyt/tVg6r8U9U8FafLa+M7HZfEbbe4iH7pz2yaANjxvqVxr+sweENHY5lw19IP4Y/T6122l6bb6RpkFjZrtihQKPf3rlvhvo8MGjHWHuFu7zUT5skwOdoP8I+ldpQAUUUUAFFFFABRRRQAUUUUAFFNkkSKNpJWCIoyzE8AVwWv/ABTsrWc2Ph2B9Vu2+UPCN0aN/tGi1wO/pGZUUs5CgdSa8gl/4WF4iTbqlzBptuehtThsVXT4a3JffceMtXlB+9Gz8VqqUmQ6kUezJIkq7o2DL6g5p1eMSfDSUnNv4v1a3H91HOKlhsPHnh0E6HqSamo4xetnIodKSEqkT2KivN9F+KRhuV0/xhZPp9x0a5A/cn8a9DtrqC9t0ntJVmicZV0OQazatuaXuS0UUUgCiiigAooooA5j4geHj4h8KzwwER3MP72KQdQV5wKd4B8QjxJ4Tt7lhslizDIp65XjNdIQGUhhkEYIrx/Udfi+F/xAuYPLe5h1rH2W0h5KP649KAPUtY1qx0Kwa71KdYo1HGTyx9BXDQ2ur/Ea6FzfiTT9BRv3UHR5v972q1pHhC+16/XWvGjiVid0NiD+7Qdsj1rvVVUQKgAVRgAdhQBBYWFrplklpYQrDBGMKijirFFFABRRRQBBe2VvqFo9teRLLDIMMjDg15/cWOq/Dq7a80vzb/Q5GzNbdXh/3favR6RlDqVYAqRgg96AKGja3Y69YLd6dMsiEfMAeVPoazfHPiBPDfhS5vD99/3MY77m4FYmseEb7QtQbW/Br+W4O6axP+rkHfA9a5uDX0+J3jmxsPKeC10wFr20m4JcdDigDtvh34fbQ/DERu/mvbn97PIerZ5Ga6ukVQihVGABgCloAKKKKACiiigAoopk00dvC807rHGgyzMcACgB9Feea78U4FuW0/wvavqV0eFnQZiU+5rBmt/H/iJcazfRaZEfu/Y2wwFXGEpbEuSW56+7pGhaRgqjuTSo6yKGRgwPcGvGI/hrcb91z4w1acHrGz8UP8NJ92638Y6tCByEV+Kr2MifaRPaKK8eit/iB4eUnRr6HUox94XjZOK3dD+KkDXK2Hii1k0y56GZxiJj7GocJR3KUk9j0SimQTxXMCTQSLJG4yrKcgin1JQUUUUAFFFFABRRRQAVkeJ9Ag8R6FPYzKN7DMTnrG/YiteigDifh9r80sEvh/V2K6jpx8v5/vSoP4q7auD8faLc2d1D4r0UYvLL/j4Uf8tIu4rq9C1q21/R4NQtD8kqgle6n0NAGjRRRQAUUUUAFFFFABXHeIvh9Z6lOdQ0eRtM1NTu86DjzD6N7V2NFAHnmm+OtQ0K+Gk+OLUwSAgJfRj90R7n1rstQ1yx0/QJtYlnRrOKMyeYp4I9qn1LTLLV7NrXUrdLiFuqOM18nfGvWz4d1CTwp4f1aaawYbpoi2RGT/AKAPY/hb8abbx1rN9pd4qQXEbk2/YSJnj8a9Yr869E1m88P6xb6lp0hjngcMpB6+1fcfw08eWfjzwpBfQuBdIoWePPIYdTQBF8U/Fo8K+E38liLy9Jht8dmPevCNOtTa2p38zTHzJT6setdZ8YtTfVfiNbaNndBZxCfPYNXPMcsTXHXld8p8ZxDiXKpGgtlqxKKKK5j5cKKKKACiiigAooooAp36zxGHULA7b2zYPCw7etfR3gfxLB4r8KWuowNkldkmf74HNfPvUEeoxXbfAjVWttS1Xw43EcH75Pck811UJfZPreHsS7yw79V+p7bRRRXWfXBRRRQAUUUUAeX/EP/krXgL/r5evUK8v+If8AyVrwF/18vXqFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/5L/4Q/69Jv616hXl/iz/AJL/AOEP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFNlljgiaWZwiIMszHAAoAdVTUtUsdItDc6lcx20I/jkOBXG6x8RxNdnTPCFq2p3jcCdBmJD7motO+H15q92upeOLxruYnJskP7kfhQBDdeNtd8U3DWXgexMaAkPe3S4Qr6qauaf8LNOe0uD4jmk1a6uUw0k5z5RP9yu3tbSCytkt7SJYYUGFRBgCpaAPP/BGq3OiavN4P1lvngy1i5/ji9/evQK4/wAf+G5NU0+PU9MzHqdgfNidPvOB/B9K0vCHiSPxLoSXPC3MfyXMXeN+4oA3qKKKACiiigAooooAKKKKACiiigCO4uIrW3ee4cRxRjczN0ArzbS7AfErxBPqmtW4k0S1YxW1vKOHYfx1a8a6jceI9bg8IaO338PfyDoIvT613Omadb6TpsFjaLtigQIv4UAcJfeB9Y8O3jaj4HvSM9bCdv3QHsKvaD8SLS5mFh4ihfSb5flPnjakjf7Jrt6ydd8M6V4ig8vU7VJXAwkmPmQ+oNAGqrK6hkIKkZBHelrzN9M8V+AWaXSJ31jSAd0kUx3SqPRa6jw5450nxEFijkNrenraT8SD8KAOkooooAKKKKACq99fW+m2Mt3eyrFDEu5mY9KsE4GTXk+talL8QPEr6dayFdEsX/fMvSZx1U04pydkJuyuNvdT1X4iXRW2aXTtBjbG4cPN/wDWrd0rRdO0ODytLtUhB+8wHLn1NW4oo4IUhgUJFGNqqOwp1dsYKK0OaUnIM0UUVZAUUUUAV9Q0+z1W1NvqVulxF/dcdK5qJdX+H919r0lpL7Rif31qeWiH+yK62l7EEZBGCPUVMoqS1KjJx2Og0bWbPXtLiv8ATpRJFIO3VT3B96v15ELiX4e+JVvbckaHfPi4i/hhJPUV61BMlxbxzRNuSRQykdwa4pRcXZnUndXH0UUVIwopksscETSzOERBlmY4AFef6l4j1PxjfyaR4QzHZodtzqB4BHoh9aANHxL40kjvBovhiMXuqyfKWXlIPdq5rXfh1dw+HZdZkuXu/EEWJVlY5EeDkhfwrvPDXhax8M2hS1UyXEnM1w/LyH3NbMiLJGyOMqwII9jQBjeENei8R+GLW/hOSV2P/vDg1t15p4XdvCHxDvvD0x8vT70+ZYIe7dWr0ugAooooAKKKKACiiigDH8V65F4e8N3V/M23apVD/tkcfrXnug/D69u9CTxAtw9p4iuMyu4OA/OQD+FX/F0jeLfHun+Goj5mn2582/A/hYcrXpMcaxRJGgwqKFA9hQByHhnxo1zdnRvEcX2LVYvl+bhJvda7GsLxN4UsvEtqFnzDcx8w3MfDofrXOaT4n1DwvqEei+MAfKY7be/H3COwY+tAHoFFIjrIiujBlYZBHcUuRnGaACiikd1jRnchVUZJPYUAVNV1S00bTZb2/lEcMYyST1PoPevL7m61f4izl52k03QQ3yIOJJR7+1O1C9k+IXieSEMV0TTnwfSZgetdUqqiKiKFRRgAV0U6d9WYznbRFTTNKsNGthBplslunfaOtW6KK6TEKKKKBBVXUtLsdYtzBqdslwnbcPu+9WqKBnKWs+r/AA8ufNt3k1DQnb95G3Lw/T2r1DS9UtdY06K9sZRJDIMgjtXNEK6Mkih0cYZT0Irk7O/l+HXiZAWZtB1GTbt/54yHoB7VzVKdtUbQnfRnrlFIrBlDKcgjIIpa5zYKKKKACiiigAooooAbJGksbJIoZGGCD0Irza1d/h540a0kY/2Jqj7o2b/lnKf4R7V6XWP4n8PweJNDmsZgBIRmGQ9Y37EUAbAORkciiuK+H+vzzRTaBrBKalp58v5/vSoP4q7WgAooooAKKKrXeo2dgu68uY4R/ttigCzRXNXXxD8KWblJdctPMH/LNXyxrCvPi/pkGfsWl39/jp9njzmgDu72TyrGaQ5+VCeOtfGvid/h5qOu37yzammoPOwdm+5nNfQn/CwfEWtoU0bw3c2wkBA+1pjH1r5A8VxXMPizUlvkCXHnsXVegOaANabwbbTu0lhrFoIcZUSvhq3fhp4q1H4ceMopYn+12Ex2XCxHK49R71w+ieHtU8RXottItJLh8/MUXIUeprqrzSrbwQUtTeLe6w5H7uE5jTPY+9AHoWq6rFr3jq71S3JMcqfLnqB6U6sjTLSSw1ARzDEk0AlYehNa9efV+Nn5znF/rs7hRRRWR5QUUUUAFFFFABRRRQADrWx8MbgWvxVIJx9oQL9axx1rV+G9sbr4qow5+zqGNbUfjPbyK/11W7M+jqKKK7z9ACiiigAooooA8v8AiH/yVrwF/wBfL16hXl/xD/5K14C/6+Xr1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigAorzH4jeOfEujeMNL8PeFIbRri8haZnuegArD/4Sb4tf3NF/KuHEZjhMNPkrVFF9mbQoVaivCNz2qivFh4m+LZIATRefauo+FXjTWPFkWr2/iKGCO80258ljB91qeHx+FxUnGhUUmuwp0alNXmrHoNFY3iDxXpXhuAtqNyqykZjgB+eT2ArjDd+LPH7FbKNtF0Zzhmk+Wcj1FdpkdB4j+IWl6G5trYNqN/nb9ltvmYH3rAi8N+JvG0i3Hii7OnWH3ora2OGYej11XhzwVpPhtFe3i8+7/iu5eZG+proaAM/R9C03QbXyNKtI7dD97YOWPqa0KKKACiiigArzXWoZPAPjGPXLRCdJ1B9l1En8Mh/jNelVT1bS7fWdLnsLxd0UyFT6jPcUAWYZo7iFJoHDxuMqy9CKfXn3gbVLnQ9Wn8H6y3zw5ayc/xxe/vXoNABRRRQAUUUUAFFFFABXP8AjLxKnhnQ2nTDXcx8u2jP8b9hW5c3EVpbSXFzII4o1LOzdAK868O28vjrxZJ4j1CM/wBm2bGOzhfozD/loKANzwD4afR9Me+1DL6lft50zP1TP8I9q62iigAooooAK5fxH4B0nXy06q1jfE5F3b/K9dRRQB5nHrninwLIlv4gtm1XSx8sdxbjMij1eupbx1oR8OXOsQ3sckNtGXkUNyp9DXQyRpLGySKGRhgg9CK+RPjz4i0iLxBJo/hRvs8a8XawHCO3cGgD1j4YfHa28Ya7c6Tq4jtpjIfsrdA654H1r2Wvzhtbqeyuo7m0laKaJtyOpwQa+vfgn8YIfGOmppGsyLHq1uoUEn/XDtj3oA7P4keIpNB8Lutm2L27PlW/+9WL4U0aPRPD0MKjEs/76Y/7Z61m+PZv7e+JelaPyF00i6Yf3s11jkFzgYHYV1UY6XMKr6DaKKK3MQooooAKKKKACiiigCpq2mwaxpFxYXS7o5Fzj3HSofhVrk89jdaBqLFr3TWwf9zotaQ4IrkLGb/hH/jNG6cDWwIyPpWNaN1c1pvWx69VPVNVstGsXu9RnSGJB1Y9T6Cs7xN4rsfDNoHuCZbmTiG2j5eQ+wrm9L8L6j4qvo9Z8YkiIENBYD7oHbcPWuQ6Cuq6z8R7rdIJNN0BW4H3ZJv/AKxrvtM0yz0ixSz0+BYYUHCqMfjVmONIo1jjUKijCqOgFOoAKKKKAOE+KOkzPpEGu6YhbUtMcPER2Un5q6nw/q0Ot6Da31u4dZIxuI/vY5/Wr08K3FvJDIMrIpUj2IrzrwFM3hvxZqnhO4Oy2RvMsi38eeTigD0miiigAooooAKz9d1SLR9Eur6ZwgijJUnu2OB+daFebfEG4fxD4n0rwhbkmC4bzrp0/g2nIBoAufC7TJW0658RaihXUNVctID2UHiu9qO2gS1to4IlCpGoUAe1SUAFUtW0iz1vT3s9RhWWJx3H3T6j3q7SMwRGY8ADJoA8r1HXbr4QRNJrE732gsT5JJzKp7CvFLn4+6zefE221mORotMik2C1zwUJ6n3rY+NNtr3i3XJvsms215awnEenwP8AOPqPWvENQ0m/0mURalaSWznoJBigD9CdG1a11zR7fUbCQSQXCBlYGuV+KfiGfSPDX2TTyDeXziFR3CngmvHP2b/iT9muj4T1WY+XJzasx4XHavQPFD/258YtOgJ/0fT4iHTsxPeqiruwm7K5teH9Hh0HQrexh52ruZj1JPNaNKetJXecgUUUUCCiiigAooooAKztf0mPW9CuLJ13OVJhP91+xrRpVbawYdqBlX4V6/Jqnh6XTrty93pT/Z5Wbqx55rua8j8NyHw/8aJNJi4h1S3a5cjpur1yuCSs7HWndXCiikZlRSzEADuakYtFY974u0DTiwvdWtodvXc/SsO7+KvhqIH7Fd/bz2FvzmgDtKK8zf4uzXEpg0zwrqsjnpK0fyUh1v4lauN2jadZ2i/9PYxQB6bUc9xFbRmSeRY0HUscV5t/wi3j7Vhu1rWIbdm+8LU4A+lPg+DkPm+ZfeJNVuieWR5floAo/ELW9D0rULfxRpus2sd9akLNHv5lj7ge9ayfGPQrjTYLuwguLzzV3FIVyV9jWpa/DHwtBzcaZFdnsZxuxXL6fplj8N/HD2ps410fVXzFKV/1cp/hHtQBYHxV1PUcrpPhXUY8cbriPANKbv4rajhrODTLWBv+ev3gK9NGMDHTtiigDzL/AIQLxVq/GteIZbYHr9kfFWbP4P2EJzfa1qWoe1xJmvRKKAOWtfht4TtgrHRbaaUf8tZFyxretNKsLAAWdpFDjpsXGKt0UAFfOmtfAV9c8e6hruv30dlohcu7Zw1fRdch8R/Ax8d+Hjp66hPZMMkeW2A/s3tQB85+Nfibo/h2yk8NfDS1S2iQeXNqAHzyfQ1594CspNe+I2lW85aU3F0PMZuc+pNXfGHwv1zwnr0umFPt7xp5jNbjdtX1Ndd+zbog1P4iyTSpxZxeZkjoc0Adt8TdJ/4R/wCJEEsSbbKa1WNP96scjBxXr/xk8Kvr/hVb61BNzpjeeir1fHavGbO6+2WiTYw+PnX+6fSuKvG0rnxPEGHcayrLZ6fMnooornPmgooooAKKKKACiiigAHr6c12HwN06S78VavrbL+4ZBHGfcda4TUbl7e2CwIZLiY7I0HVs8V9AfDPwqPCXgu2smJaWX99IT1y3OK6sPHXmPquHsO3OVd7bL9TrqKKK6z7EKKKKACiiigDy/wCIf/JWvAX/AF8vXqFeX/EP/krXgL/r5evUKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/wAl/wDCH/XpN/WvUK8v8Wf8l/8ACH/XpN/WvUKACiiigAooooAKKKKAPGviD/yXXw5/14yfzNbR61i/EEZ+O3hwD/nxk/ma3Chz2/OvzDiv/f1/hX6n0WWfwfmCffH1rx3TPiHrXhPxR4n0bw3pcl/eXl5v+QZKjHP869jRDvHTr61x/wAOPDaa5qPjKWCQ22ow6gPIuE6g4JAPtmujhD/ean+H9TPNP4cfUr6H4hs7CX7brXgvxBqeoMdxa4j3CM/7NdePjNIAAPBOugDoBF/9aug8NeLbn7cdD8UKLfVI+FkxhJx6iuxr9HPBPL/+Fzyf9CTr3/fr/wCtR/wueT/oSde/79f/AFq9QooA8v8A+Fzyf9CTr3/fr/61H/C55P8AoSde/wC/X/1q9QooA8v/AOFzyf8AQk69/wB+v/rUf8Lnk/6EnXv+/X/1q9QooA8v/wCFzyf9CTr3/fr/AOtR/wALnk/6EnXv+/X/ANavUKKAPCPGHjyfX0trrTvBeuQanaSB4pvKxkD+En0rY0j48jUbP5fCerz3EPyXIhjyEfuK9frzfWlm8BeLk1m0UnR9RfZdRIPuOf4zQBX/AOFzyf8AQk69/wB+v/rUf8Lnk/6EnXv+/X/1q9NhmjuIEmgcPG4yrL0Ip9AHl/8AwueT/oSde/79f/Wo/wCFzyf9CTr3/fr/AOtXqFFAHl//AAueT/oSde/79f8A1qP+Fzyf9CTr3/fr/wCtXqFc94z8Sr4a0RpYsNezny7WM873PSgDyPxb8XrrxQsei6R4Y1bG/N/GU+by+4FdJp/xYj0vT4bKy8Da6kECBEAi7Cuv8CeHZNI0przUSZNSvm864ZhypP8ACPauqoA8v/4XPJ/0JOvf9+v/AK1H/C55P+hJ17/v1/8AWr1CigDy/wD4XPJ/0JOvf9+v/rUf8Lnk/wChJ17/AL9f/Wr1CigDy/8A4XPJ/wBCTr3/AH6/+tR/wueT/oSde/79f/Wr1CigDy1/jJJJGyHwVrwDDGRFXyd44aGTxfezW9rcWqyyFzFc/fBPrX6B18a/FHwlrWtfEy/mis3S2Z+blhhFHqaAPJq0NDvNQ07WILzR2dbqFgylOtdDc6BoHh7Kazfi/mYZT7G2Qv1qHSWvfFWt2Oi6LapA8koRZI1+bb6k0Ae2fDXxTceOfiXfalfoElgsVjIH94CvXK840Dw5B4L+LMtlb8CXT0Eh/vPjk16QeDXZS+A5qnxCUUUVqZhRRRQAUUUUAFFFFABXlvxj16fwnr/hfXLKLzbiCRgqf3s16lXn/jrSofEnxB8K6Vc58oOxbHY9qzq/Ay4fEU/D/jQwXh1rXfBuuX2pynerGPckQ6jZXWf8Lnk/6EnXv+/X/wBatfwhr13p2rTeFvET/wClQnNrM3AmTsB7gV3NcR1Hl/8AwueT/oSde/79f/Wo/wCFzyf9CTr3/fr/AOtXqFFAHl//AAueT/oSde/79f8A1qP+Fzyf9CTr3/fr/wCtXqFFAHl//C55P+hJ17/v1/8AWrj/ABj8RLi71TTdds/CGsW0+nPlpJI8AqeoNfQFQXtql7Yz20ihllQqQR6igDzC0+OIvLVJ7fwdrcqMPvpFkE96m/4XPJ/0JOvf9+v/AK1XPhtey6Tf6j4Rv3Jk0+QtDI3/AC0Vjnj6V6JQB5f/AMLnk/6EnXv+/X/1qP8Ahc8n/Qk69/36/wDrV6hRQB5VdfHD7JbPNP4O1uJFGdzxYA+tcn4P+Is0Gral4gvPCOsXU+ovlHjjyqKOwrvviTfzalqGm+EbFysmouGldOqKp6H613ljaR2FhDawqFSJAoAHoKAPNv8Ahc8n/Qk69/36/wDrUf8AC55P+hJ17/v1/wDWr1CigDy//hc8n/Qk69/36/8ArVHP8ZpGt5FPgrXQChHMXtXqlRXWfsk2OuxsflQB8Aa9rNynjC+1Cw8+wkklLbc4ZfrUlv411BQ39opHqLHo1yNxFdRq3w38UeK/iBqC2GmTCJ5z+/dfk/Ou90r9nbSfD2mtqXxE1hIIIxuKxNgE+lAHkGmTwXWpw3ej29wuqJIJAsP3Vwf5V7x8MNXu/EviS/1HVABdw4RsV5p4o8e6TLNH4e8BabHZ2rSCL7Zt/evk4616V8N9Hl8LeIP7MuWYzTIHYt1bIzWlL40RP4T1M9aKU9aSu05QooooAKKKKACiiigAooooA8x8f+LbfwV8T9J1S5tpZv8AR9gMYrsI/GPjXXoUk8P6RHAkgyj3I4IrG1ewtdb+M+maTfQrNE1kZCrDOPet+ex8Q+ALlrnSnk1XRCcyWzcyR/7vtXFU+NnVD4URDR/ibqny6vqFhbR+lrwRSr8I57xxNqvijVDJ3jilwtdp4e8T6b4ls/O0+YeYv+thb78Z9CK2KzLOLs/hX4cgUC8ga/I6m4Od31rbs/B/h6wx9i0e0hx02RgVs0UANSNIkCxqFUdABinUUUAFFFFABWN4p8PQeJdCmsZQBJjdDJ3jfsRWzRQBxXw/8QzXME2hauSmpacfLIf70qD+Ou1rgvH2j3Gn3cPi3RVxd2ePtKjrJF3FdboesW2vaPb6haH5JkDbe6n0NAGhRRRQAUUUUAFYXi7xJF4Z0N7psNcSfJbxd5H9BW1PPFbQPNO4jjQZZmPAFecaLDL4+8YSa5eIRpGnvstIn/icfxigDY8DeFms9Mnv9aQT3+pHzJTKMlVP8H0rY0nwlouh6jNe6TYxWks4w/lrgGtqigBGVXQq4DKRgg96+ePiB4Mm8E66+oWStJo96+5+5ic9c+1fRFVdS0211bT5bK/iWWGVcMrCplFSVmc2Jw0MTSdKpsz5lBVlDIQysMgjvS1ueMfh/qngq6a50qKS/wBHkb7ijLxf/WrnLW9t71SbaQMR95e6n0rgnBwep+eY3L62Dlaauu/QnooxRWZ54UUUYzQAU2SVIImllYKijJJqC6v4LRhG7b5m4SJeSxrtPAnw0vvEtxHq3ieJrbT0OYbRuGc/7XtWkKbmz08DltbGS0Vo9yX4W+B59c1WPxNrMRjtIG/0OFhyx9T7V7mBgYFRwQRWtukFugjjjG1VUcAVJXfGKirI/Q6FCFCmqdNaIKKKKo2CiiigAooooA8v+If/ACVrwF/18vXqFeX/ABD/AOSteAv+vl69QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigDifG3wv0rxxqlpqN7d3dpdWiGOOS2facGue/4UHpn/Qx61/3/AK9XoqJU4Sd5RT+RSlJbM8o/4UHpg6eI9a/7/wBdb4F+H+m+ArS7h0ye4nN3J5ssk7bmZq6qiiNOEfhSXohOTe7MPxN4XtPElntlzDcx8w3CcMh+vpWFoPie70bUBoHiw+XKPlt7s/dlHYZ9a7msvX/D9j4i09ra+jBP/LOQfeQ+oqxGpRXA6Pr194T1JNB8VEvbscWmoH7pHZWPrXeqwZQykEEZBHegBaKKKACiiigAooooAKp6tpdvrOlz2F4u6KZCp9R7irlFAHn3gbVLnQ9Xn8Iay3zwZayc/wAUXYfWvQa4/wAf+G5NU0+PU9MBTU9PbzYmT70gH8P0rS8IeJI/E2hJc8Lcx/Jcxj/lm/cUAb1FFFAEV1cw2drJc3MgjiiUs7noAK878NW0vjfxXJ4l1FD9gtGMdlC/3WI/5aCpfGV/P4o8QQeEdHY+XxJfSj7vl91+td3p1hBpenQ2VomyGFAiD2oAs0UUUAFFFFABRRRQAUUUUAFfPf7QnhDxBHBJr2i3sq6cFxc26Nj8a961LUrXSbCS8v5VihjGSSa4a1ttR+Id+LvUla18Pxk+TbHhpz6t7UAfD9fUH7Nnw8NlZP4p1KHEs3yQK45UD+Kl8Yfs5wXvjeyvtDKw6ZLKPtUPdR7V7zp2nwaXp0FlaIEhgQIoHoBQB5r8VbV9N1/Q9dtlwBP5d0/+xxXRrKs8azR8pINy/StjxVoEPibw5daZMdvnJhXHVT61wHgfVZriwl0fUFMd7pzGLY3UoOjV0UZdDGoup09FFFdJgFFFFABRRRQAUUUUAKODk9Bya5TwfF/b3xZ1W+kG+2sEX7O3+13q/wCLtbOh6E7248y9n/dwQjq+eDiui+H3hkeG/DEUcvzXNx++lY9ctzj8K560tLG1Na3Dxv4YfXLFLzTmEWqWR8y2l9/SpfBfihPEWl7J1MOoW37u4gb7ykcZ+hrpK8+8X6VdeHNaTxbocZYKQL+BOsqetcxueg0VS0jVbXWtLhvrGQSRSrnI7HuKu0AFFFFABRRRQB5x8RbWTQ9d0vxhaqfLs38u5RP4wxwCa9BtLmO8s4rmFgySoGBHuKr6zpkWr6Pc2U6hllQgA+uOK474X6lLBbXfhjUmJvtLchie6k8UAd/UV1cJaWc1xIQFiQuSfYZqWuB+KGpzyWtn4b01yt9qbjbj+4DzQBV+Hdu/iDXdS8XXY3pcvstA3/LMDg4r0iqGi6XBo2jW1jaoESJAMD171foAKKKKACgjIwehoooA5vxl4iXwX4YuNTt9OkuigJ8uBe/qa+MPHnxK13x5qLy6nO0dvn5LZDhQPpX3ZetbpYzNeBTAqEyb+mO9fM3ib4Pt8QbzU9e8IQx2dqjHyI8YE2OuKAPM/hB4d/4SX4ladZsMojeax7DbzX0r8RIDofjzQtfUbLZU8iX0JPArj/2b/Al9o2p6rf63ZPbXMDeVGJBgkd8V7P428OR+J/DFxZOP3qjzIT6OORTTs7iaurFA8gN/eAb86Sue8F6xJqWlPZX3Go2B8u4U9uwroa707q5yNWdgooopiCiiigAooooAKVdobLnCjqTSVz3jTV207RTaWuWvL8+RGq9Vz/FSbsrjSvoQfDqH+3/H+r67MCfsDm1iY9wc9K9Y6jBrn/BPh0eGvC9rZOAbjbuncdXY+tdBXA3d3OtKyscV4g8ArNef2t4ZmOnaknzbUOI5T/tCmaB4+Zbv+yvF0H9nagp2iVhiOY/7NdxWVr/hvTfEdkbfUoFc4wkoHzRn1BpDNUEEAg5B70V5ql14h+Hcyx3wfVdBBwki8yxD1b2rvNJ1mw1yxW70y4SeI9Sp6H0NAF6iiigAooooAKKKKAGyRpLG0cqhkYYKnoRXm2nu/wAPfGr6dOx/sXVJN8Lt/BKf4R7V6XWL4r8Ow+JdClspAFlxugkPWN+xFAG1RXGfD/xFNeW0uiasSmp6cfLYP96VR/HXZ0AFFFc/4y8Sp4a0Rpkw13MfLto/779hQBzvjfU7jX9Zh8H6O3MuHvpB/DF6D3rt9L0230jTILGzXbFCgVfU47mud8B+Gn0jTXv9RzJqd8fNmd/vLn+H6V1tABRRRQAUUUUANkjSWNkkUMjDBUjgivPPFXwb0PXpjdafu0256/uPlVj7ivRaKNyZRjJWkro+db/4W+ONI3GGW2vbfPyhB8341gzWHiu1J87w1eShTg+WnWvqiisnRg+h5dTJ8FUd3C3ofLUGk+LrxlWHw9cw7+hkXp9a6HT/AIR+MtWZW1K7t7S1PVV4evoSihUoLoVSynB03dQv6nC+E/hNoHhn968Z1C56+bc/MVPtXdAAAADAFFFanppKKsgooooGFFFFABRRRQAUUUUAeX/EP/krXgL/AK+Xr1CvL/iH/wAla8Bf9fL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABXFal8UNI0fV4tM1GGaC5mYLGjfxZrta82+M3gzT9e8Lvqsji21DTv3tvcDg5HagDp9b8Y2eh3dla3EEsk17/qlTvW/DJ50KyFSm4Z2nqK8f8Agzqp8Zo+o6+6y6lY4jjjbqgHGcV7HQAUUUUAFFFFABRRWJr/AIx0LwyAus6jDbSsMxxufmf2FAHE+KyD+0B4RAOStpNn2616jXnHhOwuvEvjO48W6jbNBAg2WCv97ae9ej0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAUNZ0az13TZLLUIg8bjg91PqPeuM0/VtR8Dagmk+IGafSnbbaXnXYOwY16FVTU9MtdXsJLS+iWWJxjBHQ+o96ALMciTRrJEwdGGQwPBFOrzqG41D4dXwt71pLzQZW+SY8tB9favQLW6gvbZLi1lWWGQZR1PBFAEtFFFABRRRQAUUUUAFea6zDJ4B8Yx63aITpOoP5d1EnRHP8Zr0qqeraXbazpc9heLuimQqfUe4oAswzR3MCTQOJI3GVZehFc/418TDw3o26H5r25PlWqernpXOeENcfwtd3nhnxHMI1s1MlrM/Qw9s+9J4ZtJvGniuXxNqcZ+w2xMdlC4+VsfxigDd8B+GW0LSWub7L6let51w7dVJ/hHtXVUUUAFFFFABRRRQAUUUUAFZ2t65ZaBp7XeoShFHCr3c+gqLxF4jsvDmnm4vG3SNxFAv3pW9BXMaJ4dvvEuoLr3ixcL1trE/djHYketAEWn6PqHjjUU1bxCHg0yNs21l03+7D0r0CONIYljiUIijCqBgAUqqEUKoAAGAB2paACiiigArz/wAd+GLmG7XxN4eQ/bIObiJP+WyelegUU07O6E1c8+0TXLXXrET2rgSrxLEeqN3FaNUfE/gKUXja14TcWl+vzND0jl+o9awrDxzHHP8AY/FNrJpN0Dt3zDCyH2rrhUUtznlBrY6uimwSxXUIltpUkjPRgafg1qZiUUuDSOVijMkjqqL1JI4oAKrajqVppFk11fzLFGo4z3PpWFqnjmxtpDbaNE+rXp48m3GSp96u6B4Hv9cvI9Y8aMHxhobJfuqP9oVnOoomkYNkfg7QrrxPrI8Ua5E0dvE3+gW79R/tEe9em01EWONUjUKqjAA6AU6uNtt3Z0JW0CmyRrLG0cihkYYIPcU6ikM81Qy/DnxWEfJ0DUX+U/w2z98/WvSUdZI1dGDKwyCO4qjrej22u6TNYXqBo5B+R7GuQ8GaxdaNqknhPX3PnwnNpO3SZewH0oA76iiigAooooAK808Zxt4S8aWHiqH5LKVvL1Ajvnha9LrI8UaHB4i8O3Wn3Kblddyj/aHI/WgDS+0x/YvtWf3Xl+Zn2xmvOvBkTeK/G+o+J7keZZwt5ensf4ezVzreM7//AIQo+FBIT4jWTyTH38vOP5V6v4Z0WDw/4etdPtk2KiAsP9o8n9aANWiiigAooooAKKK4/wAc+JZ7KOPRdF/earffKijnYp4JoAyvEl9ceNPEH/CMaTIyWULA386dsfw59672wsLfTLCKzs4lihiXaqqMCsrwl4ag8M6Mlsn7y4f555m+87H1NbtACBFViyqAT1IHWloooA858b+HbnStTXxR4fj/AHq/8fcCD/WjuxqzpGsWeuWIurCUOOjr3Q+ld4yh1KsAVIwQe4rz3xJ4DurK8bWvBjrbXQ5ktT/q5B349a2p1OXR7Gc4c2qNOiuX03xzatN9j8QQSaTeDjE4wHPqK6eJ0niEsMiujdCCOa6k09jnaa3FopcGjBpiEopJXSGIyTOqIOpJFcvqPjq0WX7L4ehfV7wnaVgGfLPvSbS3Gk3sburavaaJYPd30gUAfIndz6Cqngrw5c6zqh8VeIYyrkYs7dv4E9SPWn+G/At5qF+mteM3Weccw2g+5GPcetehqoRQqgAAYAHauWpU5tFsdEIW1YtFFFYmgUUUUANliSaJopUDo4wysMgiuC1fwPe6PfnWPBE4tphzJZsf3Tjvgetd/RQByXhnx5a6xOdP1OF9O1NODBNxv919q62uf8TeDtO8TQ5nDQXS/cuoeHX8a5iz8Sa14Jul07xbGbnTxxDqEY4Rf9s+tAHo9FQWV7bajaJc2MyTwyDKuhyDU9ABRRRQAUUUUAcD490i50y+h8XaIuLm1wLpR/y0i7j611+i6vb65pEGoWhzHMgbb3U+hq7JGk0bRyqHRhgqRwRXm2nPJ8PvGz6ZMx/sXVH3QO38Ep/hHtQB6Nc3MNpbSXFzII4o13O7dAK878P20vjrxVJ4h1CMjTbN/Ls4X6MR/GKm8ZahceJ9eh8IaO2EOJL6UH5fL7rn1rudN0+30rTYLK0XbDAgRR7CgC1RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX/ABD/AOSteAv+vl69Qry/4h/8la8Bf9fL16hQAUUUUAFFFcF4h+K9h4b1+LSr/TLvzZmCxuB8r/SgDvaKo3Op/Z9FbUPs8jhY/M8pfvYxmsvwh4wh8YWUl3a2U9tCjFQ0w+8R1xQB0VFFFABRRRQAV55eT6j4r8bRabeaTcQaRa/NI8g+SY16HRQB4d420PXfBvxMsfEHgXSZZ7ecBbyCFfkI+lbnii8vb7xf4XdJrqwW/Uie2LYxXqtcp4k8KXOs+JtL1WCdIzp5yqnvnrQBxtro1zD4/v8Aw8NXvXsbtdxLSfMh/wBk1tfDa8u49Z13RJrmS5t9PlCwySnLEH1rVXwper46bXftEfllNuzHOaf4X8KXOha7qmozzpIdQfcwXtQB1dFFFABXO+KvA2heMYVTWrNZXQfJIPvL9DXRUUAec+DtRuPDPiqbwZqE5ngA3ae7/e2DqDXo1eXeKgF/aA8JFBtLWs27HfrXqNABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBDd2kF9avb3cSyxSDDIwyDXASR6h8N78zW6yXvh+ZvnjHLWx9R/s16LTJYo54XimQPG4wysMgigCKxv7bUrOO6sZlmhkGVZTVivO7yw1H4fag9/oyNdaHK2bi1HLRH1X2rt9J1a01rT47ywlEkbjt1B9DQBdooooAKKKKACiiub8beJh4c0b/R/mv7o+Vap6uelAHAfFm3l8Y6imjeHI919ZL588yfxqP+Wea77wFq1nqfhW2SzAje1UQzRDjy3HUVH4F8MnQdJM95l9SvT51y7dQx7D2rntahk8A+MY9bs0J0nUH8u6iTojn+M0AelUUyGaO4gSaBw8bjcrL0Ip9ABRRRQAUUUUAFYfibxRaeG7IPKDNcyfLDbp96Rqg8U+LoNBRbW3X7TqU3ENunJ+p9qoeGfCU320674lf7TqUvKIeVhHbA9aAIvDvhi91LUR4h8XES3jc29t/BAvbj1rt6KKACiiigAooooAKKKKACs3WPD2l69B5eqWcU+BhWZeV+hrSooA8wvPg8lrK114d1e7t5z0ikkzGPwqk3hD4m2vOn6zpzMevmqTxXrlFUpSWzE4pnkY8KfFG741DWNNVV+75SkVZtvhHdamyzeJ9buXkU/6u2fah+tep0UOUnuxKKRi6F4S0fw6g/s6zjSXGDMR87fU1tUUVJQUUUUAFFFFABXMeNvDDa9p63Fiwi1O0+e3l7gjt+NdPRQBzPgrxOPEGlmK6Uw6ja/u7iBvvAjjP4101cD4w0i70LVl8WaDGWeM/wCmQJ1mX1/Cuv0XWLXXdJh1CxkDxSjt2PcUAX6KKKACkZgiFmOFUZJ9KWuQ+JXiCXQ/Ckq2Pz3t0whijHUhuCRQB5Qtwi/tBv4mMIOkf6oT44LdK+hUYOispyGGRXBN4Btv+FXDRgT5qp9o3/xb/vfzrR+HHiCTXfCkQvfkvbYmKWM9RjgE0AdbRRRQAUUVDeXcNjZy3Ny4SOJSzEn0oAzPFHiK38NaJJe3By/3Yk7s56frWH4G8O3CPL4i14b9Vvjuw3/LFewX0rN0K1m8eeJD4g1NT/ZVqxWxgYcP/tEV6OBgYHSgAooooAKKKKACiiigDJ1vwxpPiCIrqdnHK+MLIV+Zfoa4S6+EEmnu1z4Z1m6imY58ueTKD6CvUaKabWwrXPIz4T+KFr/x4axppB6+apNA8J/E+6/4/wDWNNXHTylIr1yiq55dxcsex5dbfB9791uPEmtXcswPMcEmEP4V3ei+GNI8PxBdLsooXxhpAvzN9TWtRUtt7lWsFFFFIAooooAKKKKACiiigAqG7s7e/tmt72FJ4X4ZHGQamooA85vfDGteDbl9R8HSmezJ3TWEhzx6IK6Xwz4z07xKnlxE216g/eWkvDpXQ1yfifwJZ60323T3On6nGdyTw/LvPo3qKAOsorz/AEvxvfaHfLpHjeAwSZ2pfgfupPb6130UqTRLJEwdGGVYHgigB1FFFABXFfFKWy/4RU286hr6Y7bHH3hL2IrsLu6hsbSW5upBHDEpZ3PQAV554XtZvGniqTxRqUZFlbExWULjhsf8tBQBT+CKiz0e5sdYDprySFrgT/6xh6j2r1SuA8eaTc6VqMPi/Rh/pFrgXaD/AJaRDt9a7HRtWt9c0mC/szmOZA2O6+xoAvUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHF/EHw3c6mtjrOljdqGkuZYV/veopPC3xQ0LxFcx6c832XVSdjWkn3tw64qH4ka7dW8mmeHtOkMN1rMhiSRTyoHWtXw94C0Pw+I54bKOW+UZa7kXMhbuc0AdNRRRQAV518ZvDA1jwkdTtUY6hphE0JTrxyRXotMmiSeB4pQGR1KsD3BoA8stfHc3iX4WWT6dKo1a7Ih8peoxw1ejaFpkOkaNb2kCbFVAWH+0ev615F4A8Btpvxe1eeKfOm2LbrdB0y3Wu5+KKTr4Qe5s7qW2mjkQK0bYzk80AdpRXkOtWF94R1vQ9W07VLmZbkKtxBM+5WyByBXrcT+ZCjnqyg0APooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/yX/wAIf9ek39a9Qry/xZ/yX/wh/wBek39a9QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigBHRZEZJFDKwwQRwRXAaroWoeENRfWvDIMlox3XNl1GO5UV6BQQCCCMg9RQBmaDr9l4h05buwfPZ4z96M+hrTrhde8NXmh6i3iDwkNso5uLMfdlXvgetdD4b8S2XiSw861OyZOJoG+9E3oaANmiiigCG8u4bCzlurpxHDCpZ2PYCvPvC1pN4z8Uy+KdSjP2O3JisYnHysB0cCneLr6fxZ4kg8J6S+IFIkvph93b3T613thYwabYQ2dogSGFQqKOwoAsVT1bS7bWdLnsL1d0M6FW9R9KuUUAefeBtUudD1afwhrTfvIMvZuejRdh9a9BrkPH/huTVNPj1PTAV1PT286Ip1kx/CfatHwh4kj8TaElzkC5j/d3MY/gcdRQBvUUUUAFcr4q8XHS5F0zR4jeavPwkSc+V/tN7VD4n8WzRXY0Tw3H9q1WbjcOVgH95queFfCUWgRtc3T/AGrU5+Z7huTnuB7UAQ+FvCA01zqWsOLvVZvmeVufL9lrqqKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAR0WRGRxlWGCD3FebN5nw38VhsH/hH9RfBI+7bN/9evSqo6zpNtrmkzWF6geKVcYPY9jQBcR1kjV42DKwBUjuKdXAeDdWutB1iTwlr7nzE5sp36TL6D6V39AASACScAda8ztwfGfxYkmk+ax0HiIj7sjH+ddN4+8QHw74TubmLDTyfuo07ktxxSfD/wAP/wDCP+E7eGT5ribMsrnqS3ODQB0+BjGOPSvM5c+C/iyjJxY6/wAyE/diYfyzXplct8QvD/8Ab/hSdIvluIMTRuv3ht5wKAOpBBAI5Borm/AfiD/hIvCdtdSYWZR5cidwV45rpKAAkAEk4ArzjW7qfx94kPh/T3ZNJtHBvpl/jI6AH0rS8b+IroSR+HfD536ne/KzjkRJ3z74rd8MeHLXwzosVlbAs+Myyt952PUk0AaVpaQWNpHbWsaxxRqFVVGABU1FFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAFLVdIsdasmtdSt0njPQMM7T6j3rgnsPEHw7uDLpRk1XQycvbt80sf0PpXpVBAIIIyD1FAGR4f8Tab4lsvP02dWZeJIj96M+hFa9cRr/gJje/2t4TnGnakvO1eIpD/tCsS9+KV3puntpOrWTWmvyfuYCwwkr9mHtQBd8W30/i3xJD4U0l8QIRJfTD7pTun1rvbCxg02whs7RAkMKhUUdhWB4H8MDQNIMt0fM1G8PnXMh5+Y9h7V09ADZYkmiaOVQ6MMMpHBFebaZJJ8PfGbaVcMf7F1N91vI38Ep/hHtXpdYnizw7F4l0KWzcBZwN0EneN+xFAG3RXG/D7xFLf2Mmk6qSmpaefKdX+9Io6NXZUAFFFFABRRRQAUUUUAFFFGR60AFFGRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/xD/5K34B/wCvl69Qry/4h/8AJWvAX/Xy9eoUAFFFFABXl3jLXPiPb+L/ALB4b06GXS5QFE7Jkrnqc16jRQBgeD9Ak0DQ0gu5BNduS80v94ntWJ8X5ok8ATxyTeU8ksew++a7qq95YWmoRiO9t450ByFkXIzQBymm+GbnVn0u+16eOdLOJTAkY4PA5NdmAAABwBTURY0VI1CqowAOgFOoAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigApkzMkDtGu5gpIHqafRQBwngPxR4l1vWdVtvEOktZwW8pW3k243Cu7oxRQAUUUUAFFFFABRRRQAUUUUAFcV4k8K3Vrf/wDCQeFSIL9OZoB92de/HrXa0UAYHhfxXbeIrYqVNvexcTW78MpqDxx4m/4R7Rttt81/dnybZR/fPQn2qj4x8NpEH8RaXOLG9tFMjsOFkA5Oa5P4c6mPHviVtc8QOq3dqpjtrRhhWX/noBQB3Pgbwz/YGj+ZefPqN2fNupD/AHj2HtXT0UUAFFFFABXmuswSeAfGMet2aE6TqD+XdRJ0jc/xmvSqpavpltrGlXFjfKDBMhVvb3oAtQzx3ECTQOHjcblYdCK4vxD4qutQvzoPhPEt23yzXA5WEd/xrh9D8S6nc6jc/D7TrsEwSFRe9li/ug+teseHvDll4csBBZplzzJM33nPqTQBB4X8K2vhy0OCZ7yX5prh+WY9+fSt6iigAooooAKKKKACiiigAooooAKKKKACiiigAoPSiigDgdA8VeJ734iahpOp6Q0OlxDMNxt+9+Nd9RjnNFABRRRQAUUUUAFFFFABRRRQAUUUUAcz418L/wDCQaasto3lajanfbTDqCOcfjSeCfE/9u2D2t6ph1Oz+S4gbqMcZ/GunrzL4owP4Ut5fGmkuIZ4EKToP+W2eAT64oAL8nxl8VobNfnsdE+aZf4XY9M16aAFAAGAOAK4L4Q28MngqHVt4mu9QJkml7k56V3tABSEBlKsMgjBFLRQB5npZPg34q3OnyfLZa2d9sv8KEda6/xb4lh8NaM9ww8y4f5IIl6sx6fhWH8V9NE3hJ9UgdYbzT2Escp6gA5IH1rJ8CRy/EC4g8XatzaxL5dlbnsRwWYUAb/gbw1NYxS6zrR83Vr875GPRB2A9K7Cjp0ooAKKKKACiiigAooooAKKKKACiiigAPArgPCvirxRqnjzVNN1bSTb6ZbsRBcbcb67+jAHQUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBBfXsGnWM15duI4YULux7AV5hpvhmP4lavceItdieO2CmGxC8Hb2cVf8U3k/jLxRD4X0pz9jgIkvpx90gdY69Bs7OGwsorS1QJDCoVFHYUAedpfa/8ADuZYNSSXVdEJws68vCPVjXe6Vq9jrVkt3plwlxC3RlNWpYY54WimRZI3GGVhkEVwWq+Cb/Q75tX8Dzi3l6y2b8xuvoo9aAPQKK5Pwz47tNZm/s/UUOn6qnDW03Bb3FdZQB5/460q40fU4PF2jKRLb4F4i/xxf412ekarb61pUF/aNmOZQ2O49jVqWKOeFopkDo4wykcEV5xpMsnw/wDGL6RdMf7I1J99tI3RJD/DQB6VRRRQAUUVT1XVbTRtOlvtQmWGCJcszGgC1JIkUbSSMFRRksTgCvNvFPxp0bR5Ta6NE+r3H3WMH3Yz71594w8eap45umgs3l0/R42+XacPL759KwrW0gslxbRhCerY5b61zzrKOiPnsdndPDt06S5pfgbeo/EnxzrZdY5beytG6Kq4cfjWHcXXiS9INzrlwpHTy3IqcnPWiud1ZvqfOVM5xs38VvQit77xNZLtttbnZf8Apo+a6DTfip410Z0XUBb39onZF+c/jWJQCR0pqtNdR0s6xlN6yv6nsnhP4vaF4kZbe7zpd4x2rBcHBc+1d+CCAQcg9xXypdWNveENIu2UfdlXhl+hrrvA3xJvvClzHpfiF3u9Mc7Y7knLRH/aPpXRCspaM+lwGc0sS1TqLll+DPfaKitrmG8to7i2kWSKQblZTkEVLW57wUUUUAFFFFABRRRQB5f8Q/8AkrXgL/r5evUK8v8AiH/yVrwF/wBfL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX+LP+S/+EP8Ar0m/rXqFeX+LP+S/+EP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFcx458Tf8I/o2y1+a/uz5Nso7MehPtQBgeK72fxf4mg8LaQ5FtEfMvp1+7gdUrY13wLa3NjbtoeLC+sh/o8kfGcdm9RVjwP4Z/wCEe0bddfPqF2fNupD3c+ldNQByfhbxc9/O2ka7H9k1eDhkbgTD+8vtXWVz3irwnB4hgWaFvs2owfNBcpwQfQ+orO8NeLLhb06F4nT7PqUXCyHhZx6igDsqKKjuLiK1t3nuJFjijG5mY8AUAOd1jQvIwVVGST0FcBqes6h411B9G8Ns0GnocXd92Yd1U+tR3V7qPxCvmstLZ7XQ42xNcdDN7LXc6VpVpo2nx2dhEI4oxjgcn3PvQBxviXwHBa+HYJvDkRi1DTj50bp9+cjsx710Hg/xJH4m0JLkEC5j/d3Mf9xx1Fb1ea61DJ4B8Yx63ZoTpOoOI7qJOkbH+M0AelUUyCaO5gSaBw8bjcrDoRT6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArzPxMx8YfEWz8PxfvdPsctqMfY56ZrufEesQaFoF1f3L7FjQgH/aPT9a5j4X6PPBosus6ohGpak5eVz3XPy0AZ0iP8NvE6SRgjw9fMFYD7ts3b869JjkWWJZI2DI4DKR3Bqpq+lW2taXNY3sYeKVcYI6Hsa4zwfqlz4c1l/CWuyEkEmxnf/lqvpn2oA9ApGYIpZjgAZJPalrhvGuvXV7eR+FvDrZvrrieYdIE759zQBn6nJL8R/Ecmj27MugWZxdSKf8AXt6A0zwmx8HePbzw3OTHZXeG01OwA613Hh3QbXw5o8VhZrwgyzHqzdzXL/E/SJ30+21/S0J1DTHDKR2TPzUAd5RWdoGrwa7odtqFq++OVBz79/1rRoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuW8d+JjoGjiGz+bUb0+TbKOzHufaug1C/g0zT5r27cJDChdyfSuE8H2E/inxBN4t1dD5QzHYxMPl2dmx60Ab/gjwyPDuijz/nv7o+bdSHu5610tFFABRRRQBzvibwZpviSPfIpt7xeUuYvlcH3PpXN2XifWfBl0um+MImuLLIEWpRj5VXsG969GqC8srbULVre9hSaJ+qOMigBbS7t761S4tJVlikGVdTkEVk+LfDkXiXQ5LVsLcL81vL/zzfsa5O78Na14IuH1DwjI93Yk7prCQ7ifZPSqXiz432Gh+FDfWtnI9+fla2brC3+1QBseFvHNrZ6HNbeKrlLK704mOQzHBkUfxUeCfixpHjvxHfaZo6MVtF3eaTw49RXx14t8a6t4x1qXUdTl2vKMFI+Bj0rtv2eNaGk/EqOItgXieVj1oA+yndYo2eRgqqMkntXzv4+8Xz+N9feytnZNHsn2lR/y1cdc+or0P4z+KZNC8LJp9pn7Rqj+QGU8oD3rxyztRZWiQA5YD5m/vH1rnrT5VZHz+d454ekqUH70vyJwAqhVGFHAA7UUUVxHwgUUUUAFFFFABTZI0miaKVQyMMEGnUUBsdZ8LvGs/h7WIvDWqymSyumxaSMeUPpn0r3evlHUrZrm0zCxSeI743HVSOa9/wDhl4p/4SvwVbXbgiaL9zID1yvGa76M+ZWZ+gZNjniqPLP4onX0UUVse2FFFFABRRRQB5f8Q/8AkrXgL/r5evUK8v8AiH/yVrwF/wBfL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX+LP+S/+EP8Ar0m/rXqFeX+LP+S/+EP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRTEmjkZljkVivDBTnFPoAKKKKACiiigAooooAKKKKAIL69g06xmu7pwkMKF3Y9gK4DwlZTeLvE03izVEP2aPMVhGw+Up/fx60nim7m8Z+JovC+lORaQMJL6dfukDqleg2dnBYWUVraoI4Yl2oo7CgCaiiigArE8S+F7TxHZbJv3VzHzFOnDKfr6Vt1U1PVLTR7CS7v5ViiQZJPegDi9L8XT+F5W0rxvKsKxD9zfNwjr2H1qErqHxG1DLCSz8Pwtlexuff6VTvPD0vxcJk16BrXQYz/o8WMSSH+/n0rQ0TWbnwVexeHvEQCWPCWN4B8uOyn3oA7qysrfT7RLaziWKJBgKoxU9IrBlDKQQehFLQAVT1bS7bWdLnsL1N0M6lW9RVyigDz7wNqlzoerz+ENafMkHz2ch6NF2H1r0GuQ8f+G5dUsItT0sFdT09vOhKcGTH8J9q0fB/iSLxLoSXIIFzH+7uI+myQdRQBvUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFME0bSGNZFLjqobkfhT6ACiiigAooooAKKKKACiiigAooqnq2oQ6VpVxeXEiokSE5b1xwKAOB8cyv4m8Z6X4Wtz5lmG8y/C/wgcjNejwxJBBHDGMJGoVR7CuB+F2nzXUN74o1JCt9qchBDdkB4xXoNABXOeM/DCeI9J/cnyr63O+3mX7ykc4z710dU9W1S10bTJr6+kEcUS5ye57CgDgbf4lG38NS2d5ET4jh/c/Yh94noG+net/wR4Xk0e0fUNUYTate/PPJ6Z6AV50+havfay3xMitQJoz+6sivzGMdWP4V6/oOt2viDR4dQsnDpIOR/dbuKANGo7mBLq1lgkGUlQo30IxUlFAHm/gGdvDPiTUfCN0dkEbl7Et/GDycV6RXnnxNsZdOudO8XWCF7nTXCFAPvKx5JrudNvotS02C7t3DpKgbI+lAFqiiigAooooAKKKKACiiigAooooAKKKYk8UjsiSIzL1UMCRQA+iiigAooooAKKKKACiiigAooooAKKKKACiiuT8eeJm0XS1s7AGTUr4+TAq9UJ/iPtQBh+IrqXxv4sj8N6c5/s61PmXs6/dYj+CvQ7W1hsrSK2tkCRRKFRR2FYfgzwyvhrRBHKQ97OfMupP77nrXQ0AFFFFABRRRQAUUUUAIzBELMcADJNfGfx28Z22v+MpbTSLcW9vbEpIyrt81+5NfZpAYEEZB6iuV8QfDbwt4liZNQ0uEFurxKFY/jQB8DVv+BNQ/srx3pF7u2iG5Via+hPE37Lum3ILeGr5rVuu2YlhXlHiH4F+MvDU5khtReRxnPmxMOMd+tAHpXxJ1Y+IPiPAqNus4rRZEH+1WSTk5rH0m8lv75ZLlSs0MAicH1FbFcFZ3mz89zuo542S7WQUUUVieMFFFFABRRRQAUUUUAA9PXiuu+B2oSWfizV9FZv3GwSRjtnvXIjrWr8OLj7L8Vo1zjz1C/Wt6DtM97Iajji+Xuj6OoooruPvQooooAKKKKAPL/iH/wAla8Bf9fL16hXl/wAQ/wDkrXgL/r5evUKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/yX/wh/wBek39a9Qry/wAWf8l/8If9ek39a9QoAKKKKACiiigAooooAKZNGZYHjB2llIyO1PooA4fwN4BuvCmsape3eqy3ovZC6I7EiMeldxRRQAUUUUAFFFFABRRRQAVy/jrxKdB0fybT59QvD5Nui9QT/F9K6C/vYdNsJry6cJDChdifQVwXhGxn8WeJJvFuqofITMdhGw42f3setAHQ+CfDQ8O6L/pHz39yfNupD3c10lFFABRRWR4i8SWPhvTzcXr5dvlihX70jdgKAJtc1yy0DTnu7+QKBwq93PoK47TdG1HxrqCat4jDQ6ajbray6Z9C1TaJ4bvfEWpL4g8WA5621kfuxr2yPWu7VQqhVAAHAA7UAJHGkUaxxKERRgKowAKp6xo1lrmnvZ6hCJI3HBxyp9Qexq9RQB55p+q6h4D1BNK14vcaU5xbXvXZ6K1egRSxzxLLC4dGGVYHgioNR0211WxktL6JZYpBggjp7iuDin1L4dXwhu2e70CVsJJ1a3+vtQB6NRUVrdQ3ltHcWsiyxSDcrKeCKloAK811mCTwD4yj1uzQnSdQYR3US9ImPVzXpVU9W0u21rS57C9TdDOu1vUUAWYJ47mBJoHDxyAMrDoRT68+8D6pc6Jq8/hDWnzLB89pIejRdh9a9BoAKKKKACiiigAooooAKKKKACiiigAoPIoooA4TQfh9d6T8QL/xDPq008NyMJbFjhK7uiigAooooAKKKKACiiigAooooAK84+I9zJrmsaX4RtWJjvZN10ydYwDkZr0C9uo7KxmuZmCpEhYk+wrgPhrayazqOpeLr1CJL5zHErD7iqeooA9As7aOzsobaIAJEgQAD0GKmoooAR3WONnchVUZJPYV5vI0vxI8UGGMkeHtPf527XLj0+lW/GWsXWt6onhPw+5E8p/0u4XpAvofrXXaHo1roOkw2FkgWOMc47nuaALqQRx24gVAIwu0L2x6V5xdJJ8OfFn2yIH+wdSfEwA+W3bt+delVU1TTLbV9Nmsr2MSRSrggjp70AWYpUmiSWJgyOAykdwadXnvhTUrnwtrx8J61IxhYk6fO/8Ay0X0zXoVAFbULKPUdOntJgCk0ZQ57ZGM1wnw0vZNKvdS8IXZOdMk/cu3WRSc8V6JXm/xFgbw5rmm+MbVTtt3ENxGv8YY4yaAPSKKitp1ubWKeMgrIgYEe4qWgAooooAKKKKACiiigAooooACMjFcH4X+H15oHjnU9dn1eW5hvGJS3ZjhK7yigAooooAKKKKACiiigAooooAKKKKACiiigCrqeo2+k6bPfXjhIYELMTXDeC9NuPEevT+L9ZQ/PmOwjYcCLs2PWoNenk8eeME0Cyc/2VYsJLuZejOP4D616NBBFa26QQIEjjXaqqOAKAJKKKKACiiigAooooAKKKKAGuxSNmC7iBnA714p4n/aU0fQdRuNPh0u4nuYGKNzgAivYNU1ex0Wye71K4SCFOrMa+RfjdpP9qa03ijQ9Ikg0mb5WuNuBI396gDQ1v8Aab8U3bldHjhtIz/eQE1f+G1l43+LOuC91rVbqDRoT+9KMVEp/uiuA+Fvw0vfiBr6R7Gj0+I5nmxxj0Ffa2haHY+HdGg03TIVighUKAoxn3NAHzbrOmxaL4/vdOgBEaLlc9SKK6P4x6Y+k/EO11rG2G9iEHsWrnSMEiuCsrTPz7PKbhjG+j1EooorE8UKKKKACiiigAooooAB1rZ+F9sLv4qOxGfs6BvpWN0BPoM12/wG0prm+1XxI3KXH7lPbaa3oK87n0GQU3LFOfRI9soooruPuwooooAKKKKAPL/iH/yVrwF/18vXqFeX/EP/AJK14C/6+Xr1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8AFn/Jf/CH/XpN/WvUK8v8Wf8AJf8Awh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKK5Xx54mOhaQLezBk1C9PkwIvVSeN30FAGF4lu5vGviqPwzpjn7BbHzL2demR/BXoNpaQ2NpFa2qCOGJdqKOwrC8FeGR4b0ULOQ9/cnzLqX+89dHQAUUVzfinxbFoaLaWifatSn4hgTk/U0AT+JvFNp4btAzgz3UnENsn3pG9KxvDnhe71DUP+Eh8Wfvbx+YLVuVtx6Y9al8MeEp1uzrfiVxdanJyqnlYR6AetdjQAUUUUAFFFFABUV1awXts9vdRrLE4wysMg1LRQB51JBffDe/M9qJLvw7M2ZUPLWx9fpXe2F/banZR3djMs0Eo3KympZYo54WimRZI3GGVhkEV59e6fqHw/wBQfUdFje60aVs3FoOTD6sPagD0SiqWk6vZ61p6XlhKJI3HbqD6GrtAHIeP/DcuqWEWp6WCuqac3nQlOC+P4T7Vo+D/ABJF4m0JLkEC4j/d3Cf3JB1Fb1ea6zBJ4C8ZR63ZoTpOoMI7qJekTd3NAHpVFMgnjuYEmgcPHIoZWHQin0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFMnlWCCSaQ4WNSxPsBQBwHxPv5b37B4WsnIuNUkAYr1VQa7fS7CLTNLt7OBAiRRhcD1xya4DwJC3ibxbqfiy4G+1ZvKst38G04OK9KoAK5jxr4o/sHTlt7NfP1O7/d20C9STxn8K19c1m00HSJtQvpAkca8Z/iPYVyXgvRrvV9Sk8Wa/GVnn/49YH/5Yp2I+tAGv4K8LnQNOaa8bztSuvnuJj1JPOPwrpqKKACiiigDnvGPhmPxJpBRD5d5B89vMv3lYc4z71U8DeJpNXtJNO1QeVqtidk8Z4J9DXWVwvjfQ7qyvYvFOgIfttr/AK+NP+Wyd8+uBQB3VUNb0uHWdFubG4QOssZCg9mxwfzqPw/rlr4h0aHULJwyuMMO6t3FadAHBfC/VJls7zw5qDl73SZCjs3cE8V3teaeMYz4S8dad4mgBjsZ28q+2/xsThc16TG6yxLIhyrAEH60AOooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5Dx94kfS9OTTdNBk1K/PlRKvVAf4q6PVtUt9G0qe/u2CxQIWPPX2rivBGl3Gu6tN4v1tSZJsrYoRwkX09aAOj8IeG4/DWhpbnD3Unz3Mv99+5reoooAKKKKACiiigAoorG8Q+KtK8NWol1K4VWbiOMcs59MUAbBYKMsQAO5ride+IsMN4dL8M2z6tqWdrJF0i9yayvK8UfEJz9oD6LojHmM8SSj1BrttB8M6X4dtVh023VWAwZWGXb6mgDlNL8A3urXq6r44vPtlwCDHbRnEaD0I711Ou+GdO17wzcaHcQRraTJs2qoAX6VsUUAYfhPwlpfg3RI9M0eERxoPmbux9TW5RRQBxfxR8JL4r8IyLEhe9s8zWuP74rwbTrlrm12zDFxCfLmU9Qw619W14b8U/A8uh37eJNEhLWsh/wBLhQfd9WrGtDmV1ueJnGAeKo80Pij/AFY46io4J47mFZoGDIwyKkrgPz9pp2YUUUUAFFFFABRRUN1dRWcJklPsqjqTTHGLk7R3Ir4zzy2+m2IJu71wkYH619H+CvDUHhTwta6bAu0qu+T3c9a4P4TeBJY2HiXXocXUvNvE4/1Y7GvW676UORan6JlWB+qUPe+J7/5BRRRWp6wUUUUAFFFFAHl/xD/5K14C/wCvl69Qry/4h/8AJWvAX/Xy9eoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/AJL/AOEP+vSb+teoV5f4s/5L/wCEP+vSb+teoUAFFFFABRRRQAUUUUAFFFMnl8mCSUjOxS2B3xQA+iuN8GfEKHxhqeoWcWnz2psnKFpBw/0rsqACiiigAooooArahfQaZp897dOEhgQuxPoK4PwdYz+KvEM3i/VkJiGY9PjYceX/AHsetR+I7mXxx4sj8Nac5/s60YSXs69Nw/gNeiW1tDZ20dvbRiOKNdqqo4AoAloorj/E/i6aK7GieG4vteqTcEryIB/eNAEvirxd/Zsq6Toyfa9YnGI4l58sf3jS+FfB66U7alqsn2vVZ/mklbkIfRfSp/CvhKHQIWuLl/tWpzndPctySfQegro6ACiiigAooooAKKKKACiiigApHRZEKOoZWGCCMg0tFAHn+q6DqHhHUX1rwuC9ox3XVj1yO5Wus0DxBZeItOW6sXyekkZ+8jdwa1CAQQRkHtXDa94YvNF1FvEHhEbZhzc2Y+7MvfA9aAO5qnq2l22taXPYXyb4Z12sKpeG/E1n4l0/z7Y7JkO2aBuGjbuK2aAPPvA2qXWiatP4Q1t8ywfPaSHgNH2X616DXIeP/Dcuq2EWp6WCuqac3mwFeC+P4T7Vo+D/ABJF4m0KO5UgXEf7u4ToVcdRQBvUUUUAFFFFABRRRQAUUUUAFFFBOATQAUVxWh/EaHW/G154dTTp4nteszD5W+ldrQAUUUUAFFFFABRRRQAVwnxR1aZNJg0LTJCupao4SHHoD836V3LuscbOxwqgkmvNfCqN4u+ImoeIZh5unWZEdgx7MOGoA7rw/pMOiaFa2NvGIxGg3Af3scn860JZEhiaSVgqICzE9gKdXn3izU7nxRrY8J6G7BAQb+4T/lkvpmgCtbrJ8R/FP2qQH+wNPfEQP3bhh/hXpKIqIqIAFUYAHYVU0rTLbR9MhsbOMJFEoGAOp7mrlABRRRQAUUUUAFIyhlKsAQRgg96WigDza+il+HXic6lbq39hX7gXCgcQMemB2r0aGaO4gSaFg8bqGVgeoqHUdPt9U0+azvI1kilUqQwz+NcJ4Vv7jwh4gPhPV5GNtISdPmf+JfTNAHW+KNFh1/w5dWM6b9yFox/tgfL+tYHww1qa98PtpmpPnUtOYxTqeo54/Su2rzPWgfB3xQtdUj/d6dquVvH7B+goA9MopFYMoZTkEZBpaACiiigAooooAKKKKACig8CuK8PfEeHxB41v/D0enTwvZEgzOPlb6UAdrRRRQAUUUUAFFFFABRRRQAUUVxvj/wARy2FkmkaUDLql+fLRV6xqer/hQBj6vLJ8QfF66NZs39kaa++6lXo0g/g969HhhjghSKFAkaDaqqMACsbwl4ci8M6HHaAh7hhuuJR/y0fua3KACiiigAooooAKjuLmG0gaa5kWONBksxwBXNeJfHum6BJ9khBvtSYfu7SHkt+Pauft/C2veNZ0vfGE72lhndHp0Z2sv+8e9AE2o+OtR8QXT6b4EtmnOdragy/u4j9Kv+Hvh3bWV1/aevzHVNTfl3k5jB9lPSuq0/TLPSrVbewt44I1GMIuM/WrVACABVCqAAOAB2paKKACiiigAooooAKZNDHcQtFOiyRuMMrDIIp9FAHifjf4T3WlzS6r4MjMiMd0tj6n2rzyLUUNybS9ja0vF4eGQYx+NfV9cz4o+H/h7xbB5eq2QDZz5sPyP+YrGdKMtTxsdlFDFvmXuy7/AOZ4F16EH6HNGD6V22p/Ae/t5c+F9cFtEOiTgsSPrWNcfCnxvaKCt1FdEnGFWud0J9D52pw/iov3WmjCwfSg4H3mVfqcVvwfCTxtdKC1/Dak9mXpW7pPwEMxz4r1dr0HqIMpTVCXUdPh/EyfvtJHm63sl3d/YtGtnv7w8COMdPxr1bwJ8JvJmi1jxaBPdjDR25+7H9R3rvfDvg/RfC1mlvpNmibf+WjAFz+NbldEKUYH0mByqhhPeWsu7/QRVCqFUAADAA7UtFFanrBRRRQAUUUUAFFFFAHl/wAQ/wDkrXgL/r5evUK8w8Xr/a/xi8KxWh3nTJGknxztBr0+gAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8l/8If8AXpN/WvUK8v8AFn/Jf/CH/XpN/WvUKACiiigAooooAKKKKACggEYIyDRRQBDDZ21uzNb28UTN94ogBP1qaiigAooooAK5Tx74mbRNIFrYgyajenyYEXqueN30FdFqWoQaXp097duEihQsxPtXCeDdPn8T+IZvF+roTGcpp8bDgR+uPWgDovBfhpfDeiKkxEl9cfvLqX++9dEeBk0EhQSxAA6k1wWteIL7xRqT6B4TcrEp23d8B8qDuo96AJfEPii91LUDoPhIeZcn5Z7ocrD61t+GPC1r4dtDg+deS/NPcNyXb29qn8P+HbLw5p4t7JMseZJW5Zz6k1rUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHF+JPClxb3/wDb/hY+TqEYzLAvCzj0x61qeF/Flt4htzGw+z30XE1u/BU10Fcl4o8Ivd3C6voEgtNVh5DLwJPYigDra821mCTwD4yj1uzQnSdQYR3US9Im7ua6Lwr4tXWN1hqUf2TVYOJIH43Y/iFberaXba1pc9hfJvhnXawoAsQTx3Nuk8Dh45FDKw6EGpK8+8DapdaJq8/hDW3zLB89pKejR9l+teg0AFFFFABRRRQAUUUUAFFUNR1zTdJtzPf3kUSDrlhn8q5m5+Lvgq0UGfWEAJxwjH+lAHXpZ20U5mjt4klbq6oAT+NTVxFt8YfBN2xEOsqSOTmNh/Sui0rxLpGtw+bpt9FKv+9g/kaANWigEEZByKKACiiigAoopGIVSzHAAyTQBxvxM1ybTPDRs9Ob/iY3zCKBB1bnn9K2fCOhxeH/AAzaWMK7SEDyf75GT+tcbpoPjL4qT30nz6fo2BauPuu5613HiLXbXw7os1/eNhUGFXuzHoBQBj+OPE8mkWkenaWPO1a+OyCJeT7mrXgzwxH4a0cJIfMvJz5lxMfvMx5xn2rI8D6DdXV3L4o8QJm/u+Yo3/5ZJ2x6ZFdzQAUUUUAFFFFABRRRQAUUUUAFc/4x8MxeJdGaH/V3UJ8yCZeGVh0GfeugooA5LwN4ml1S3l0vVv3erWB2ToeN3oR61c8c6AniLwpc2h++g86MjruXkVj+ONAube7h8UeH126hZ/61F/5ap3z68V0nh3XrXxHosV9aMCHGHU9VbuCKAMn4d6++ueFoReHbfW37qeI9VI4Ga6uvM+fBXxXdjxYa9l3c/djcdBXpgORkdDQAUUUUAFFFFABRR061m6p4h0rRoPO1G9ihT/eyfyoA0qhjs7WKZporeJJW+86oAT+Ncdc/GDwTakedrKDPTEbH+lLbfGDwTdZ8nWUOPVGH9KAO2orN0zxBpesQCbT72KVT/tYP5VpdelABRRRQAUUUUAFFFBOBk0AUdZ1a30TSbi/u2AjhQtjPLH0Fcd4E0m41bUZ/F+tKWnusizVv+WcR7Y9aqag8nxC8ZDTYGb+xdMfdcOvR5R29xXpEcaQxrHEoRFGFVRgAUAOooooAKKRmVFLOwUDqScVxGu/ENEvDpfha1bVtRztZY/uxe5PegDq9V1iw0Wye71K4SCJBkknn8q4KbX/EfjqY23hiJtO0w5DX8q/6xfb0q1pXw+utTvl1bxvem/ugQY4EOI0HoR3rvYYIreJYreNIo16Ki4A/CgDnvDXgfSvDce+KM3N253PcT/M2fYnpXSUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFcz4ws/FN5Hbx+E7+GyYn97JKm7ArpqKAOY8JeDYvDzS3t3MbvVLkf6RcHufb0rp6KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/wAl/wDCH/XpN/WvUK8v8Wf8l/8ACH/XpN/WvUKACiiigAooooAKKKKACiiigAooooAKKK5Px74lbRtKFlYAy6lenyoUXqueN34UAYXiGeXx14uTw5YOf7MsmEl7MvTeP4K9Djjt9PslRAsMEK4A6BQKwPCuhW/g/wAOFr2VftDDzbu4bjc3XNc/cXepfEK/a000vaaHG2JbjoZ/YUASalrGoeONQfRvDjNDpqHbd33r6qK7HRdEstB02Oy06IJGg5Y/ec+pPepdM0u00ewjs7CFYokGAAOT7n1q3QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBzPirwhHrWy+0+T7Jqtv8ANDOvGT6N61B4W8XPe3DaPryfZNXgGGVuBKP7wrra57xT4Tt/EVuskbfZr+A7oLlOCp9/UUAUfH3huXVLCLU9LUrqmnN5sBXgvj+E+1aPg/xJF4m0GO5U4uI/3dwnQq461leG/FlxFe/2F4oT7NqMfEcjcLOPUGsjWYZPAXjKPW7NCdI1BhHdRr0hP9+gD0qimQTx3Nuk0Dh45FDKw7g0+gAoorD8VeJ7bwvpLXUw8yZvlhhHWRvSgCxr3iHTvDmntd6nOI1H3V6lj6AV5xc+JvF3jN9miw/2JYdDNMMmVfb0p2maJea3errniyTz5m+aC2/hjXsCK6rgKFUBVHRR0FdMKXWRjKp0Rx9n8NtMS4N3qlzdXl033t0hKH8K24PDOh25JXS7Z8/34wa1KK3UUtjJtszZ/DeiXAAbSrVMf3IgKxdQ+HGiXkgnt3ubS4TlDDJtUH3FdZRQ4p7iTaORt9R8ZeCmDyN/bunDgQoPnjHrmvQfDHi7TfFNn5llJtmUfvYG4ZD6Vmg447HqPWub1nw1J9rGr+HJRY6lF83y8I/sRWE6K3iaxqdz1OiuX8F+L4/Elk0N0n2fUrb5biBuv+99DXUVzG4Vy3xC8QPoPhWd7TDXk/7uGPuxPBxXUngZNeZSZ8Z/FhUPzWOg/OG/hkY9qAOm8F6NB4U8GQpMwUlTcTO/UFhk5Nc7pcMvxD8T/wBr3it/Yli5FpGw4lYdSR3qTxJez+M/EH/CL6S5WxgIN/Mnp2UGu90+wt9MsIbO0jWOKJQqhRigCcAKoCjAHAApaKKACiiigAooooAKKKKACiiigAooooACAwIIyD1BrzbVYJvh94p/tiyVv7Fv3xdxAcRsehA7V6TVe/sYNSsJrO7jEkUqlWBGetAHL+PtFTxV4MaSykBkhxcwyJ1O3nANX/A/iEeJPC9tdsNk4GyWPupHFc14avJvB+vv4U1dy1jOSbCV+691JqHRyfBfxPudLl/48taJmtvSMjtQB6ZRRRQAVj+I/E+m+GNPN1qUwXskY5Zz2AFVfGHiuDwxpm/Hm3cvywQjqxPeuL0rw9c316Nb8Vyfar1uYoT9yIfSrhByZMpKJDca74z8ZuRp6f2DYdCZRlpR6g0lh8ONJt5jc301zeXDct5sm5SfpXXZ4CjhR0A6CkrqjTjE53NszIPDWiQZ26Xavn+/EDRP4a0S4xu0u1TH9yICtOitLIm5yV98ONJnm+02E1zZ3K/dMcmEH4UtvrvjPwY3/EwX+3rDoPKGGiX1JrrKXPGDyD1B6Gs5U4spTaNbw54p03xPZefp0wLrxJEeGQ+mK2a8q1Xw7cWd7/bfhaT7LfxcvEPuTDuMetdp4P8AFlv4o00uF8m8gOy5t26o1cs4OLOiMlI6GiiioKCuL8f+Ip7aGHQtGzJqeoHYFXrGh4LV0eu6zb6Do89/dkbYlJVc8uewFcp4C0a4v7ubxbrYJu7zJtkbrDEe1AHR+FvDsHhrRIrKLDS43TS95G9TWzRUVxcw2kDTXMqxRqMlmOBQBLWJ4i8XaT4ZthJqVyA7nEcS8szemBXLX/jnUvEd2+m+BbZpQDtfUWX5Ij9O9aXh34e2mnXP9pa3KdT1R+ZJZeUB9lPSgDFFv4p+IUhN4H0TRScGD/lpMvqD2rt9C8NaZ4ds1t9Mt1TAwZGGXb6mtUAKAAMAdAKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8AFn/Jf/CH/XpN/WvUK8v8Wf8AJf8Awh/16Tf1r1CgAooooAKKKKACiiigAoqOSeGHHnSpHn+8wFR/b7P/AJ+4P+/goAsUVX+32f8Az9wf9/BUscscq7opFdfVWBFAFfVNRg0nTJ766YLFChY5PXHauA8MRjVNRufHXiRxFAqkWKycBIuecetVvGOsweJPEA0t7jy9F05t97MD95x/B71csdMu/HdzFPfQtY+HbYj7NaL8vn46MfagBVXUPiRfb38y08OxN8q9GuSO/wBK7+zs7ewtI7a0iWKKMYVVFSQwxW8KxQRrHGgwqqMACn0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBi+JPDNn4jsvLnHl3CcxTrwyH61ydtqEoWTwh45Tck6+XBdn7so4wM9jXo1eR/H3xhpmh+EHsmkQ6pPxAB9+P39qAOfv8A4wn4Tzt4a1WD+1JImLRSRvwsR+6DU/wg+Mmo+OPHeoWequkVqybrWL0OelfK1xdT3kxluppJpD1eRix/Wul+GmuN4e+IWlX2/bGs4EnPUUAfetxPHa27zzuEjjG5mJ6CvI9Hkl8beIrnX9TU/Y7WUxWkB6ZH8VbPxW11j4c03TLRyDrjiIFTyAcGrul2CaXpNtZRqF8mMK2O59a3oxu7syqSsrFsnJpKKK6jnCiiigAooooAKOlFFAHJ+K7efQruLxXoynz7Zh9phX/lsua9O0bVINa0i3v7ZgyTIGOOxxyK5uSJJ4JIZFDLIpUg+9YHwsvJNL8Qax4TkYmOzPnRbv8AaPNc1aP2jenLodj4219fDnhW6vesmNkajqSeOK4Sye58KeELTRdN/e+INZYyburIG55/CofiD4igu/G0Fu/7220b55oBz5rN0474Ndb4G8OTxPL4g1sb9SvPu7ukUf8ACB6cVzmxr+EvDUHhnRUtkG6d/nnlblmY9ea3aKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDB8X+GoPEuivA3yXEX7yCVfvKw6c14d4/8AibZW/hhLS/k2+LNKlCqcYJANe1+PNb1XQPCl1faFp7X92inbGv8AD718KeJNW1HXPEF1qOtAi8mcmQFdvP0oA9U0r9oPxfqHiezgnuY0s5pkjZAvIBOOtfWF3fxWGkPfXLARxReYxJ68Zr879Ol8nVLWX+5MjfkRX15411yXWPB/h/RrSQrcakqSMVPVV6imlfQHoL4eWfxXrdx4o1YF4dxXT426IufSuvJycmobW2isrKG2t0CRxoAFHrjmpa7ox5VY5JO7uFFFFUSFFFFABRRRQAoODkVyHiOKfwxrFv4n0dSBuCXcS9Ch6sa66orq0j1CxnspgDHcIUbPoamUeZWKTs7nWadqFvqmnQXtm4eGdAyMO4qySACScAdSa80+EWpyxx6n4eujtGmzlLYHq0fPNanxA1+4RYPDui5k1LUPlKr1jjPBauFqx17mXdl/iL40FohY6FpUmZSOkko6fUV6UqrGgVAFVRgAcAVzVhHo/wAPvCscV3cJFHCuZZD96RvXHU1zcmu+JfHkht/DcTaXpZP/ACEJB/rV9AO1IDf8S+PdP0OT7HaKb/U2/wBXaw8lvx7VhW/hDXPGU63njS5eCxJ3x6bESpQ+5710vhvwTpXhtDJDH592/MlxL8zFvbPSujoArWGnWmmWq29hbxwRqMYRQM/X1qzRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/5L/4Q/69Jv616hXl/iz/AJL/AOEP+vSb+teoUAFFFFABRRRQAUUUUAeI/FmxXXPizoGj3txcJZSWjuyQyFMsCfSqJ+Fvh3P+t1H/AMCmrX+IX/JdfDn/AF4yfzNbR6mvzviXG4nD41QpVHFcq0T9T3cvo050ryV9Tj1+Fnh0uAZdR/8AApqyvCfi8eC9B8WaVbTT3Vyt55NnC7l3UEYz+tejJ98fWvKvCvw51Xxb4+8Q63outJps1le7Arx7w2R1x+Fb8L4zE4jEVI1puSUer8yMxpU6cE4q2p6H4B8A3t5bR33ifcI2PmeQeDI3UMa9ajjSKNUjUIijAVRgCvMf+EN+J/bx5bAf9ev/ANaj/hDfih/0Plv/AOAv/wBavvTxT1CivL/+EN+KH/Q+W/8A4C//AFqP+EN+KH/Q+W//AIC//WoA9Qory/8A4Q34of8AQ+W//gL/APWo/wCEN+KH/Q+W/wD4C/8A1qAPUKK8v/4Q34of9D5b/wDgL/8AWo/4Q34of9D5b/8AgL/9agD1CivL/wDhDfih/wBD5b/+Av8A9aj/AIQ34of9D5b/APgL/wDWoA9Qory//hDfih/0Plv/AOAv/wBaj/hDfih/0Plv/wCAv/1qAPUKK8v/AOEN+KH/AEPlv/4C/wD1qP8AhDfih/0Plv8A+Av/ANagD1CivL/+EN+KH/Q+W/8A4C//AFqP+EN+KH/Q+W//AIC//WoA9Qory/8A4Q34of8AQ+W//gL/APWo/wCEN+KH/Q+W/wD4C/8A1qAPUKK8v/4Q34of9D5b/wDgL/8AWo/4Q34of9D5b/8AgL/9agD1CivL/wDhDfih/wBD5b/+Av8A9aj/AIQ34of9D5b/APgL/wDWoA9Qory//hDfih/0Plv/AOAv/wBaj/hDfih/0Plv/wCAv/1qAPUKK8v/AOEN+KH/AEPlv/4C/wD1qP8AhDfih/0Plv8A+Av/ANagD1CvlL4t694W1fx1dWGt6dNBeR/J9tL/ACqPpXrv/CG/FD/ofLf/AMBf/rV8s/Ey11Ky8c3kOtXy312p+eZRgNQAt94LtZWUeGdVi1Q/xAfKVrnLrT7vTLwxXETJJGQTjnH41Fbm5D4tDLuP/PLOf0r0nwl8OfHvia0RLeyaOxlPzzzryB+NAHo3gzxMPH3iHTI3bzI9Jt12j0YDrXr7HLE+teR/DPwYPAfxT1TRZJ/OkFkshb6163XZR+E5qnxBRRRWpmFFFFABRRRQAUUUUAKOCK848aeIIvAfjmx1pztjvVaOUj+LjivRq8u+MXhx/FmveFdGhuFtpLiVgsjDODWdX4GaQ+I0/hP4Su9fv5fFPiINJG0zPaK4xuBPGQete3AADAGAK8qtvAvxKs7WK2tvHNtHDEoRFFr0A/Cpf+EN+KH/AEPlv/4C/wD1q4jpPUKK8v8A+EN+KH/Q+W//AIC//Wo/4Q34of8AQ+W//gL/APWoA9Qory//AIQ34of9D5b/APgL/wDWo/4Q34of9D5b/wDgL/8AWoA9Qory/wD4Q34of9D5b/8AgL/9aj/hDfih/wBD5b/+Av8A9agD1CivL/8AhDfih/0Plv8A+Av/ANaj/hDfih/0Plv/AOAv/wBagD1CivL/APhDfih/0Plv/wCAv/1qP+EN+KH/AEPlv/4C/wD1qAPUKK8v/wCEN+KH/Q+W/wD4C/8A1qP+EN+KH/Q+W/8A4C//AFqAPUKK8v8A+EN+KH/Q+W//AIC//Wo/4Q34of8AQ+W//gL/APWoA9QpkzmOCRwMlVJx+FeZf8Ib8UP+h8t//AX/AOtUc/g74ni3kLePLcgKcj7Njt9KAMP/AIaR0y08S3Wka1prwRwuUM+cg/hVfxH4f+GnxdjaXQ9Tt7PVWHyyDCDPoRXzZ4shuoPFN9Ffzi4uFlIeUDG4+tZ9mLwyj7B5/mE8eTnP6UAdL40+HOu+CNQMd/D5sBP7q4i+ZWHrx0r2L4VX7eIbjQri5bzJNOgaMe1YPgbwb8UNatorafdHpU42tLeDeQp9M12Pwz8NJ4S8Ya3o5k8w27ja3rnrWlNe+iJ/CepHqaSiiu05QooooAKKKKACiiigApQcHIpKKAOC1HWIfB/xgivHYJazWDMyZx5kmOKb4f8AE0smo3WpabZvq+v3zHYmOLRD2zWd8RPCw8WfE7RLOe4Mdt5YMka8Fh9a9s0Lw5pfhyxS10q1SJEGAxGWP1NcVT42dUPhRyukfD+e/v11fxpdnULvgxwg4jjHoR3ru4oo4IxHDGsaL0VBgD8KfRWZYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl3iohv2gPCIHVbSbP616jXDePfDt7NqVh4m0OLztS0wFVi/voetWvDXxF0rxBfrpeJYNTC5kgeMgKR15oA6+iiigAooooAKKKKAPD/ixqltofxh8PalqQlW0SzkVpEjLAHJ9KrH4o+Fc/wDH1cf+A7V7lcWVrd4+1W0M+OnmRhsfnUP9i6X/ANA2z/78L/hXhZhkWGzCt7aq2na2lv8AI7KGMqUI8sUjxRPij4UDgm6uP/Adq1/gNcJfXPiu+t0kFvcX4aJpEK7hg8816p/Yul/9A2z/AO/C/wCFWILW3tUK20EcKnqI0Cj9KvLslw+XTlOk221bUVfFzrpKSJaKKK9o5AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArwXXv2e5vFfju51jUdQENpI2fKAyWFe9UUAcT4W+EnhLwntfT9NR5h1klG7J/Gu0jijiQJEioo6KowKdRQB5DrqHTfjebyYbI7y2WJG/vHHSuvYYYisb4v6ZIbLTNdhU/8SqfzZCvUrxWlZXiajp1veRkFZ4w/HvXXReljnqLW5NRRRWxkFFFFABRRRQAUUUUAA61xmqIdT+MHhuOIbv7PcvIB/DmuzLCNGkYgKiliT7Vzfwytn1jxjrfidl3W02IYCexB5xWVZ2jY1prU9UooorjOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACo50MltIi9WQgflUlFAHz9H+zb/a3iu61XXdQ/0eWUsIFHJH1r1Xwz8M/C3hOMDStMiDY5eUBj9ea6yigBFVUUKihVHQAYAryK6jOnfGiWOX5DfKXj/wBoCvXq8w+LVk+n3uleKYFJeycQttHQMaqDtJMmSujoD1NJTYpUuLeOaJgySIGBH0p1d5yBRRRQAUUUUAFFFFABRRTZriOzt5LqYgRwqXbPoKAOSj/0/wCPenpGN8MFizO3YMO1ev15h8JrGW8vdY8QXS7lubgi0c/88+elen1wTd5NnXFWVgoooqSgooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACoEsrWOczR20KSn+NYwG/Op6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvrKHULGW0ukDxSrtZT3rybww0/hjWbvwxq7n/WGW1kboVPRRXsFc34y8JQ+JtNHlnyb+3O+2nHVW9/arhLldyZR5lYqEYODSVzeieI5kuv7F8SxfY9Th+UFvuyj+9n3rpSCPp612ppq6OVprcSiiimIKKKKACilAJ6CsHXfE8emSLY6dH9t1Of5YoE5wfUntSbS1Y0rlPxjfz3fk+G9HYm/vmALL/wAsl75r0Xw7osHh/QrbT7ZQojUb8d2xyfzrD8EeDm0VJNT1dhcaxd8yyH+AdlH0rsK4pz5mdMY8qCiiioLCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACqer6bDq+k3FjcqGSZCvI6HHBq5RQB5F4Tnn0O+n8K6uxWS2Y/ZXf/lqufWut6VL438IjxFZLc2LeRqdr80Eo6nHO38a5nQvExurg6VrkX2HVYflaN+A2O4NddKd1ZnPOFndHQ0UpBHWkrYyCiiigAoopQCelACdelcl4uurnV7238LaKd9zcsDcleixdwTV7W/E32W4XS9FT7Zqs/CKnIj9ya6XwT4QXw9ayXV63n6ndnfPMeSD6D2rGpUsrI1hC+rNzRtKttE0e206yXbDboEUVeoorkOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMLxP4S03xRZ+Xex7Zk5imThlPbmvPp7fxh4IbbdRnXdNHWccNCv07169SMqupVgGB6gjOaqMnHYTSe55VpnxB8P6o/lJNLDMOCssZUD8TXQrd2bqCt7bHPbzRW1q/g3w/rsXl6lpkUi/9M2aI/mhBrnI/gj4AilEkejThwc5/tO7/wDjtbKu+qMnS7Fo3NoPvXtsPrKKw9V8c6Bo4P2q5d26AQrvyfwrUm+CngK4k3zaNOzf9hO6H/tWtrRvAPhnw/8A8gvS0TjH72V5v/Q2NDr9kCpd2cBDf+KfGjCHw/Ztplg3/MQcckfSu68J+B7HwyhnY/atQkH725k5JPt6V0scaRIEiRUUdAowKdWMpuW5oopbBRRRUlBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc74q8G6d4otMTr5N0nMVwnDKf610VFAHkE7+LfBLeVqtudZ05Ot6o5QfSrml+PvD+rHZBcSRyDhllQqAfxr1J0WRSrqGU9QRkGsHWfA/hzX02anpcbj/pk7wn80INbRqyW5m6aZlLd2bgFb23OewlFDXVmoJa9thjn/Wiq0PwT8AwSiSLRp1cd/wC07o/+1aJfgl4Bmk8yTRp2b1/tO6/+O1ft/In2XmZOqePvD+kDFxcSSOThREhbJ/CqcEni7xo4i0q2Ojae3/L43Vx9K9A0fwP4c0IY0zS404x+9d5T/wCPk1vIiooVFCqOgAwKiVWT2KVNI53wr4L07wvbEwr513JzNcPyzH29K6OiisTQKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAP/Z) ###Code from typing import Sequence class Initializer: def init_weights(self, n_in, n_out) -> Sequence[Sequence[Var]]: raise NotImplementedError def init_bias(self, n_out) -> Sequence[Var]: raise NotImplementedError class NormalInitializer(Initializer): def __init__(self, mean=0, std=0.1): self.mean = mean self.std = std def init_weights(self, n_in, n_out): return [[Var(random.gauss(self.mean, self.std)) for _ in range(n_out)] for _ in range(n_in)] def init_bias(self, n_out): return [Var(0.0) for _ in range(n_out)] ###Output _____no_output_____ ###Markdown Exercise e) Dense layerComplete the DenseLayer class below. The dense layer takes an input vector and computes an output vector corresponding to the value of each artificial neuron in the dense layer. ###Code class DenseLayer: def __init__(self, n_in: int, n_out: int, act_fn, initializer: Initializer = NormalInitializer()): """ n_in: the number of inputs to the layer n_out: the number of output neurons in the layer act_fn: the non-linear activation function for each neuron initializer: The initializer to use to initialize the weights and biases """ self.weights = initializer.init_weights(n_in, n_out) self.bias = initializer.init_bias(n_out) self.act_fn = act_fn def __repr__(self): return 'Weights: ' + repr(self.weights) + ' Biases: ' + repr(self.bias) def parameters(self) -> Sequence[Var]: """Returns all the vars of the layer (weights + biases) as a single flat list""" toret = [] for i in range(len(self.weights)): for j in range(len(self.weights[0])): toret.append(self.weights[i][j]) for i in range(len(self.bias)): toret.append(self.bias[i]) return toret def forward(self, inputs: Sequence[Var]) -> Sequence[Var]: """ inputs: A n_in length vector of Var's corresponding to the previous layer outputs or the data if it's the first layer. Computes the forward pass of the dense layer: For each output neuron, j, it computes: act_fn(weights[i][j]*inputs[i] + bias[j]) Returns a vector of Vars that is n_out long. """ assert len(self.weights) == len(inputs), "weights and inputs must match in first dimension" ls = [] added = Var(0.0) for j in range(len(self.weights[0])): added = Var(0.0) added.v = 0.0 for i in range(len(inputs)): added += self.weights[i][j] * inputs[i] #ls.append(self.act_fn(self.weights[i][j] * inputs[i] + self.bias[j])) ls.append(self.act_fn(added + self.bias[j])) return ls ###Output _____no_output_____ ###Markdown Verify that your class is correct by running the code below, and verifying that `actual` is the same as `expected`. Here we define a small 3x2 dense layer with some fixed parameters and use numpy to compute the expected values. ###Code import numpy as np np.random.seed(0) w = np.random.randn(3, 2) print(w) b = np.random.randn(2) print(b) x = np.random.randn(3) print(x) expected = np.tanh(x@w+b) class FixedInit(Initializer): """ An initializer used for debugging that will return the w and b variables defined above regardless of the input and output size. """ def init_weights(self, n_in, n_out): return [list(map(Var, r.tolist())) for r in w] def init_bias(self, n_out): return list(map(Var, b.tolist())) layer = DenseLayer(3, 2, lambda x: x.tanh(), FixedInit()) var_x = list(map(Var, x.tolist())) actual = layer.forward(var_x) print(actual) print(expected) ###Output [[ 1.76405235 0.40015721] [ 0.97873798 2.2408932 ] [ 1.86755799 -0.97727788]] [ 0.95008842 -0.15135721] [-0.10321885 0.4105985 0.14404357] [Var(v=0.8935, grad=0.0000), Var(v=0.5275, grad=0.0000)] [0.89347265 0.52750061] ###Markdown Exercise f) MLPWe'll now combine multiple DenseLayers into a neural network. We'll define a class to help us with this. We name it Multi-Layer Perceptron (MLP), since in the "old days", a single dense layer neural network was called a perceptron. It takes a list of DenseLayer as input and defines a forward function. The forward function takes a vector of inputs, the data inputs, and return a vector of outputs, the output of the neural network, after being passed through each layer of the network. It also has a parameters function which just returns all the parameters of the layers as a single flat list.Complete the MLP class below. ###Code class MLP: def __init__(self, layers: Sequence[DenseLayer]): self.layers = layers def parameters(self) -> Sequence[Var]: """ Returns all the parameters of the layers as a flat list""" all_param = [] for layer in self.layers: all_param += layer.parameters() return all_param def forward(self, x: Sequence[Var]) -> Sequence[Var]: """ Computes the forward pass of the MLP: x = layer(x) for each layer in layers """ inp = x for layer in self.layers: x = layer.forward(x) return x ###Output _____no_output_____ ###Markdown Exercise g) SGDNow we need code that will perform the stochastic gradient descent. Complete the class below ###Code class SGD: def __init__(self, parameters: Sequence[Var], learning_rate: float): self.parameters = parameters self.learning_rate = learning_rate def zero_grad(self): """ Set the gradient to zero for all parameters """ for parameter in self.parameters: parameter.grad = 0 def step(self): """Performs a single step of SGD for each parameter: p = p - learning_rate * grad_p """ for parameter in self.parameters: parameter.v = parameter.v - self.learning_rate * parameter.grad ###Output _____no_output_____ ###Markdown Loss functionsWe are only missing a loss function now. We're doing regression so we'll use the L2 loss function $L2(t, y) = (t-y)^2$, where $t$ is the expected output (the target) and $y$ is the output of the neural network. ###Code def squared_loss(t: Var, y: Var) -> Var: return (t-y)**2 ###Output _____no_output_____ ###Markdown Backward passNow the magic happens! We get the calculation of the gradients for free. Let's see how it works. ###Code mlp = MLP([ DenseLayer(1, 5, lambda x: x.tanh()), DenseLayer(5, 1, lambda x: x) ]) x, t = sample_data() x = Var(x) t = Var(t) y = mlp.forward([x]) loss = squared_loss(t, y[0]) loss.backward() ###Output _____no_output_____ ###Markdown and the gradients will be calculated: ###Code for i,layer in enumerate(mlp.layers): print("layer", i, layer) ###Output layer 0 Weights: [[Var(v=-0.0014, grad=-0.9652), Var(v=-0.0525, grad=-0.8994), Var(v=-0.1696, grad=-0.4621), Var(v=0.0303, grad=0.1691), Var(v=-0.0462, grad=0.0690)]] Biases: [Var(v=0.0000, grad=0.7676), Var(v=0.0000, grad=0.7153), Var(v=0.0000, grad=0.3675), Var(v=0.0000, grad=-0.1345), Var(v=0.0000, grad=-0.0549)] layer 1 Weights: [[Var(v=0.1857, grad=0.0074)], [Var(v=0.1738, grad=0.2725)], [Var(v=0.0930, grad=0.8687)], [Var(v=-0.0326, grad=-0.1576)], [Var(v=-0.0133, grad=0.2401)]] Biases: [Var(v=0.0000, grad=4.1341)] ###Markdown Exercise h) Putting it all togetherWe are ready to train some neural networks!We'll train the neural network for 100 gradient updates. Each gradient will be calculated on the average loss over a minibatch of samples. Read and understand the code below. Answer the inline comment questions. We'll plot the loss for each batch, which should decrease steadily. ###Code mlp = MLP([ DenseLayer(1, 16, lambda x: x.tanh()), DenseLayer(16, 1, lambda x: x) ]) # What does this line do? learning_rate = 0.01 #Try different learning rates optim = SGD(mlp.parameters(), learning_rate) # What does this line do? batch_size = 64 losses = [] for i in tqdm.tqdm(range(100)): loss = Var(0.0) for _ in range(batch_size): # What does this loop do? x, y_target = random.choice(train_data) # What does this line do? x = Var(x) y_target = Var(y_target) y = mlp.forward([x]) loss += squared_loss(y_target, y[0]) loss = loss / Var(batch_size) # What does this line do? losses.append(loss.v) optim.zero_grad() # Why do we need to call zero_grad here? loss.backward() # What does this line do? optim.step()# What does this line do? plt.plot(losses, '.') plt.ylabel('L2 loss') plt.xlabel('Batches') plt.show() ###Output 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 100/100 [00:02<00:00, 45.62it/s] ###Markdown The plot should look similar to: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXgAAAEGCAYAAABvtY4XAAAcW0lEQVR4Ae2de6xlVX3Hv0ihyluRh1XOMGPoNNpYCiZoFWpNRaskptY2bUKtDXb6skGpyQwwMRiTWkraSJ1qmyAKFaUtJZhcxqAVakkp0strALUMLQOSQNXS1hr7hzSn+V3W7/KbfffeZ++zz7n79dnJzV577fX4rc9v7e9Zd+1z1pI4IAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEOETj++OOnZ555Jn8woA/QB+gDFfuApG93SMaLTTFx54AABCAAgeoEJK0Wq2qH7iDw1Z1KSghAAAJGAIGnH0AAAhAYKAEEfqCOpVkQgAAEEHj6AAQgAIGBEkDgB+pYmgUBCEAAgacPQAACEBgogcEL/OqBp6d7bt0/tTMHBCAAgTERGLTAm6hv3713unXXytoZkR9T16atEIDAoAXeRu4m7lt2rky37VpZG8njcghAAAJjITBogfcRvIm7jeQZwY+lW9NOCEDACAxa4K2BJurMwdPZIQCBMRIYvMCP0am0GQIQgIARQODpBxCAAAQGSgCBH6hjaRYEIAABBJ4+AAEIQGCgBBD4gTqWZkEAAhBoS+C3S7ov/H1X0vvKlp9nPXg6KwQgAIF6BNoS+Kjlh0p6StKWGJkNI/D1HEtqCEAAAl0Q+HMl/WNW0LPXCDydFQIQgEA9Al0Q+KslvTcr6Ol6RzJwdTKZ1GsZqSEAAQiMnEDbAn+4pO9IOqlA4NejGcGPvKfSfAhAoDaBtgX+7ZK+uK7iJQEEvrZvyQABCIycQNsCf72kXy/R9fVbCPzIeyrNhwAEahNoU+CPlPQfko5dV/GSAAJf27dkgAAERk6gTYEvkfONtxD4kfdUmg8BCNQmgMDXRkYGCEAAAv0ggMD3w09YCQEIQKA2AQS+NjIyQAACEOgHAQS+H37CSghAAAK1CSDwtZGRAQIQgEA/CCDw/fATVkIAAhCoTQCBr42MDBCAAAT6QQCB74efsBICEIBAbQIIfG1kZIAABCDQDwIIfD/8hJUQgAAEahMYlcCvHnh6uufW/VM7c0AAAhAYOoHRCLyJ+vbde6dbd62snRH5oXdt2gcBCIxG4G3kbuK+ZefKdNuulbWRPO6HAAQgMGQCoxF4H8GbuNtInhH8kLs1bYMABIzAaATeGmuizhw8HR8CEBgLgVEJ/FicSjshAAEIGAEEnn4AAQhAYKAEEPiBOpZmQQACEGhT4I+TdIOkb0j6uqTXbtyo77kYtuyjs0IAAhCoR6BNgb9G0nuShB8uyQS/8EDg6zmW1BCAAATaEvhjJT0q6ZBCRc/cQODprBCAAATqEWhL4E+XdJekT0u6V9JVko7MaLpd7kgGrk4mk3otIzUEIACBkRNoS+BfLekZSWclUb9S0odzBH49ihH8yHsqzYcABGoTaEvgT5Z0YF29pbMl3RyuNwQR+Nq+JQMEIDByAm0JvAn47ZK2JyW/TNIVG1Q9RCDwI++pNB8CEKhNoE2Bt3n4VUn7JN0k6YVBzzcEEfjaviUDBCAwcgJtCvwGES+LQOBH3lNpPgQgUJsAAl8bGRkgAAEI9IPAaAWelSX70UGxEgIQmJ/AKAXe14Znd6f5Ow45IQCB7hMYpcCzu1P3OyYWQgACzQmMUuB9BM/uTs07ECVAAALdJTBKgTd3MAff3U6JZRCAwGIIjFbgF4OPUiAAAQh0lwAC313fYBkEIACBRgQQ+Eb4yAwBCECguwQQ+O76BssgAAEINCKAwDfCR2YIQAAC3SWAwHfXN1gGAQhAoBEBBL4RPjJDAAIQ6C4BBL67vsEyCEAAAo0IIPCN8JEZAhCAQHcJIPDd9Q2WQQACEGhEAIFvhI/MEIAABLpLAIHvrm+wDAIQgEAjAm0K/AFJD0i6r4oRbNnXyM9khgAERkigiraWbZXa5J4J/IurFoDAj7B30mQIQKARAQS+ET4yQwACEOgugTYF/lFJ90i6W9KOgpG8xa/a32Qy6S5FLIMABCDQQQJtCvxLk6ifKOl+SecUiPxaNFM0Hew9mAQBCHSaQJsCH/X8MkkfiBHZMALf6X6EcRCAQAcJtCXwR0o6Oom4he+Q9JasqMdrBL6DvQeTIACBThNoS+C3pWkZm5p5SNKlUczzwgh8p/sRxkEAAh0k0JbA52l4aRwC38Heg0kQgECnCSDwnXYPxkEAAhCYnwACPz87ckIAAhDoNAEEvtPuwTgIQAAC8xNA4OdnR04IQAACnSaAwE+n09UDT0/33Lp/7dxpb2EcBCAAgRoERi/wJu7bd++dbt21sna2aw4IQAACQyAweoG3kbuJ+5adK9Ntu1bWRvJDcCxtgAAEIDB6gfcRvIm7jeQZwfNQQAACQyEweoE3R5qoMwc/lC5NOyAAASeAwDsJzhCAAAQGRgCBzziU0XwGCJcQgEBvCSDwwXU+H883agIUghCAQG8JIPDBdXyjJsAgCAEI9J5AU4G/UNIxkg6R9Mm0Bd+5pctCznlzM1aT9BE836jpfb+mARCAwHQ6bSrwtp67HW+WdKOkVyaRT9GLO22GwFuPYA6e5wICEBgKgaYCvy9J+JWSfj6F712crD9X0mYJ/FAcSzsgAAEINBX4T0n6oqT9ko5I2/Dd/ZwsLy6EwNNZIQABCNQj0FTgnyfpDEnHJSl/kaRXLU7WnysJga/nWFJDAAIQaCrwr5Nkm2bbcb6kP5G0JV1XOR0qyaZ0VmYlRuDprBCAAATqEWgq8DYHb9+g+Ykk1L8r6SuzxDrcv0jSZxH4ek4jNQQgAIEqBJoK/D1JrD8o6YIU9rig47nBl0n6sqQ3IvBVXEUaCEAAAvUINBV4G61fnF6ynizJ5uQfyJXzjZE3SDpT0hsQ+HpOIzUEIACBKgSaCryJuk2znJ30eyLpXRu1fEPMeZI+nmLLBH5HMnB1MplUaQ9pIAABCEAgEWgq8KbRJ0kywba/EzdIeX7ERyQ9IemApKckfV/SZ/KTPhvLS1b6LAQgAIF6BJoK/C9JekzSNZKulfSopHeWCXXOvbIR/HpyBL6eY0kNAQhAoKnA21IFcdR+giRfvmBdnGcEEHj6IQQgAIElEGgq8NkXqnVess7Q/YNvM4JfgvcpEgIQGDSBpgJ/haRbJL07/X1B0uUHS/NirhD4QfdDGgcBCCyBQFOBN/X+hfQLVvsVqy84thhVD6Ug8EvwPkVCAAKDJrAIgQ8yvLwgAj/ofkjjIACBJRCYV+D/R9J3c/48fuFKj8AvwfsUCQEIDJrAvAK/cAGfVSACP+h+SOMgAIElEEDgS6Cyu1MJHG5BAAKdJ4DAF7jI92fdumtlun333rWt/AqSEg0BCECgkwQQ+AK37Ll1/9TEfcvOlaltwm3XHBCAAAT6RACBL/CWj+BN3BnBF0AiGgIQ6DSBeQX+FEnXS7pd0iWSDgsvSW8K4YUF23jJyhx8p/suxkEAAjMIzCvwX5L0W5JOl/QxSXdIOj6puW3Bt/CjDYGP7BD7SIMwBCDQBwLzCvx9GQW3/VgfkvRySVV3dMoUUX7ZpsD7dA0vXPvQpbERAhBwAvMKvIn58zOS/LOSHpH0ZCZ+IZdtCjwvXL27cIYABPpEYF6Bf7+kn85R7p+UZNM3Cz/aFHgfwfPCtU9dG1shAIF5Bb5MwN9XdnPee20KvHUT5uB5WCAAgb4RWIbAPz6viJfla1vg++ZY7IUABCCwDIH/ZplQz3sPgaezQgACEKhHYBkCzwi+ng9IDQEIQGApBOYVeF8WOLtksMU/M+8ovSwfI/il+J9CIQCBAROYV+DLtLjKPfuK5V1pg277yuWHZmVC4AfcC2kaBCCwFAJtCfwhko5Kom7LHHxV0mvKRB6BX4r/KRQCEBgwgbYEPmr5EenXr2fFyGwYgR9wL6RpEIDAUgi0KfCHSrIlD74n6fKsoKfrHcnA1clkshQAFAoBCEBgqATaFHjX9OMk3Sbpxz0i78wIfqhdkHZBAALLItAFgTc9/6CkD+QJu8ch8MvqApQLAQgMlUBbAn+CJBu52/GCtK78eek699QlgWfZgqE+DrQLAsMi0JbAv0qSrRu/T9KDaQSfK+we2RWB94XHWDp4WA8CrYHAEAm0JfCu25XPXRF4lg4e4mNAmyAwTAIIfE2/+gjelw6+7s7H1jbktngOCEAAAl0igMDP4Q2fgzdxtw25ma6ZAyJZIACBpRNA4BsgZrqmATyyQgACSyeAwDdAnJ2uYZqmAUyyQgACCyeAwDdE6tM1iHtDkGSHAAQWTgCBXzhSCoQABCDQDQIIfDf8gBUQgAAEFk4AgV84UgqEAAQg0A0CCHw3/IAVEIAABBZOAIFfOFIKhAAEINANAgh8N/yAFRCAAAQWTgCBXzhSCoQABCDQDQII/AL9wHfiFwiToiAAgcYEEPjGCJ8twMSddWkWBJNiIACBhRBA4BeCcbq2oqQtOrZl58rUVpq0dWo4IAABCLRJAIFfEH0fwfsywixdsCCwFAMBCMxNAIGfG93GjNk5+Oz1xhzEQAACEFgeAQR+SWx9RM9a8UsCTLEQgMBMAm0J/CmSbpP0NUkPSbpw1t59XdmybybRlIC14quSIh0EILAsAm0J/EsknZFE/WhJD0t6RZnI903gfQTPnPyyui7lQgACswi0JfBZLf+8pDdlI+N13wTewDMHP6v7cR8CEFgmgS4I/KmSHpd0TBT0FN6RDFydTCbL5EDZEIAABAZHoG2BP0rS3ZLekSPuB0X1cQQ/uN5CgyAAgV4RaFPgD5N0i6SLDlLyggsEvlf9CmMhAIEOEGhL4A+RdK2kjxbo+YZoBL4DvQUTIACBXhFoS+BfL2kqaZ+k+9LfWzeoeohA4HvVrzAWAhDoAIG2BD5Id7UgAt+B3oIJEIBArwgg8L1yF8ZCAAIQqE4Aga/OipQQgAAEekUAge+VuzAWAhCAQHUCCHx1VqSEAAQg0CsCCHyv3IWxEIAABKoTQOCrsyIlBCAAgV4RQOB75S6MhQAEIFCdAAJfnRUpIQABCPSKAALfK3dhLAQgAIHqBBD46qwapYxrw8dwo0LJDAEIQKCEAAJfAmdRt3x3J9uf9bRLbp6eduneKXu1Loou5UAAAkUEEPgiMguMj/uznrpzZWp/W3auTG07P7vHAQEIQGAZBBD4ZVDNlOkjeBN0H8GzV2sGEpcQgMDCCSDwC0eaX2Ccd4/h/NTEQgACEGhOAIFvzrBRCYh9I3xkhgAESggg8CVwln3Lp2544bps0pQPgXESQOBb9Ht8+coL1xYdQdUQGCgBBL5Fx/oInheuLTqBqiEwYAJtCfzVkr4l6cFqG/ZJQ92yjzn4AT9dNA0CLRNoS+DPkXQGAt+y96keAhAYNIG2BN4G7qci8IPuWzQOAhBomUDXBX5HMnB1Mpm0jIrqIQABCPSLQNcFfn2Kfqhz8P3qLlgLAQj0iQAC3ydvYSsEIACBGgQQ+Bqwlp2Ub9QsmzDlQ2BcBNoS+M9JelLSDyQ9IemC9bmYgsDQp2j8O/H8qnVcDyCthcAyCbQl8AUyXhw9dIHnV63L7OaUDYFxEkDgO+J3H8Hn/aqVqZuOOAkzINAzAgh8hxyWJ+Qu/EzddMhRmAKBnhBA4DvuKKZuOu4gzINAhwkg8B11jo/mr7vzsen23XvXtvezs8VzQAACEKhCAIGvQmmT02SnZUzkbSSPuG+yI6gOAj0ngMB30IFMy3TQKZgEgR4SQOA76DQfwed9o6aD5mISBCDQUQIIfEcdYyKfNy1TN76jzcMsCEBgEwgg8JsAeVFV+MjevzLpc/P+ItbjLR0HBCAAAQS+R30gzs1v3bkyffnFN09N1P28ZefK2rdtLB0HBCAAAQS+R33AR/A2Nx9F3cU+b86+aEon2+yq6bL5uIYABLpLAIHvrm9yLXMh9mkZF3WfrrH7fljYvjvvUzd5aSxtNl0sw8viDAEI9I8AAt8/n61bbEJc9iL2khv3rYm7Td34KN/F3vJ6/pjOPjDiFI+nsXOVo276KmVm02xGHdk6uYZAHwkg8H30WonNJn4+aj/tkpunp1367K9g45SOifjFN+7LTWd5faTv/yX4h4LHF4l9rDv7q9soylXCRU2sWkdR/qL4aFNMUxQf02TDdfMUpS+Kz9Y363pR5cyqJ+9+Ud1F8XlldDmu6+1A4Lvce+awLb6IdSG3OBdrn9LJjtpN8GO67Mvb7H8AUey9k1cp0z90rPyicPbDwTAU1eH/bdh9/2DLy+8ovRw7e9jZZD/IsvGW3g/P63F+nc2Tx8nSzkqfLcfr8fr97OXEMmNaC5dxifm9zOw5ponhbLrsdVHdRfHZ/FWvi2yqEl8lTZEd2XZEX5flsT5reeMR7YjhmGaeMAI/D7UO5/FO50IeO1LsOEXprPOZ0MVpHSsr/gcQxT5PpC19jI95T925MrU/K78obPn9A8fsdFvjh4K3zx+qog8Xz5/98CqyL7Yt2h1tyopvvI55YlmxvhguSh/jY91V2hM/4KI/rRwXlywPy+Msq9aRl97y+pGt231a5ivLa2W4nbPCkX1sQzbe7bKzf+BFP5TlLWpnbF/0tZXl9cX2ZG3ycmN81qZYjnOtc0bg69DqSVrrFP6AlJmcl87irINmBdQ7ocVH8YkiHYUoPsTe+e2+d+AqYR9Rx7JiHW5TVvi9jhhv4SK7o30xTYyPZcY0Zs/5V90580MxcorhWEcsN8bHumM4po9l5jGyOBcxO2d5eH11mWXTex3W/9w/Vne028PZeM/r9nm6bB0xPjKIbYjxkUfsS5FZUd4YH+uNtlr5RfU5gzLeMW+0Kdo9r9Aj8GUKONJ7ecJvKDzeO218QC1snd47op3t2uMtj3/oeDl2zgvHh9A7eSzL8tgRR1CezuJi/vjA+MNqaf1hzdoX2+YPcbbMWI6nifZ5W2NZsb4Y9vxWR0wf4+u2x8s3UYnlOGuLL/sPrQqzIlFyNtm6YxuKfGXx8cMy2lEU9vosb7QpxkceHrb0MVyUN8ZHGyy/+cyZRt95uUWiXsXWaJ+z9H5fR5baFPi3SPoXSY9I2lW8Wd+zd4a+ZV8dp3UhrXdsO8dwtK0oPqbJC1u+KJhFdeSls/JivD9s9sBkxc7S+UMa7ciLj2Vmy/E6Z5UVy43hOnVXaU9WTM0uP4rakSdQZcyK0kdBtPxed7Zeu7YjG19Ubmx3DEdfxLwxPsvDp4qsbvszG4vyxvhsvd6GyNbKivXFD5poUyw3xkebYjmRpddX5dyWwB8q6V8lbZN0uKT7Jb2iTOQR+CruHE4af/CyD1G2hUXpYnwMZ/PXuV5UOXXq9LSx7hj2+/Fs9000XKDtOh5F+WN8DMe8MRzTeDgrXBbvh6eJcXYvGx+vq4S9/LyyPK6Mh+ePdXmc5zfhtvtFabLpY33GxPNn0+XFexqrK5Zj13WPtgT+tZJuCYJ+sST7KzwQ+LquJf2YCVQRomXxabPuojZttk2Lqq9pOW0J/DslXRXU/Fcl7QnXHtyRDFydTCZFviMeAhCAAARyCHRd4F3oxQg+x3tEQQACECgh0JbAM0VT4hRuQQACEFgEgbYE/ock/ZukreEl6yvXh+s5AUbwi3A3ZUAAAmMi0JbAm4S/VdLD6ds0l+Zo+kFRCPyYuiVthQAEFkGgTYE/SMBnXSDwi3A3ZUAAAmMigMCPydu0FQIQGBWB3gi8pG8nY1fnOB+YI8889XQpzxjbbPzH2O4xthlfS1X0xnRz8IeBGNsxxjabj8fY7jG2GV+PTdFK2jvGB2CMbeahL3kIBnhrjH18jG2e2XXHCGWMbUbgZz4Kg0owxj4+xjbP7LS25MHYjjG22Xw8xnaPsc34emyKRnshAAEIQAACEIAABCAAAQhAAAIQgAAEukig1q5RXWxARZtOkXSbpK9JekjShSnfiyR9SdL+dH5hxfL6lMw2j7lX0koy2tY3+mraKeyv0lpHfWpPFVuPk3SDpG9I+rokW7xv6L5+f+rbD0r6nKTnp7WshubrqyV9S5K1048i3x4i6U9TX98n6QzPMIZz7V2jegzlJcG5R6c1fmyHrD8K2yHatoiX97iNRaZfJOmzQeD/WtIvp8R/Lum3izL2OP4aSe9J9tuOaCb4Q/b1SyU9KukFqc3m43dLGqKvz0nPchT4It/ael5fkGRC/5o0sOlxt65neu0liesV3+nUn5f0prTnrYm/HXa2PXCHdLxM0pclvTEJvHX070iy1UrtyPaBFN3r07FJ7Kyt8TDfDtXXJvDfTP+lmG/tv7U3D9jXp2ZG8EW+/QtJvxI6QUwXoocZrLpr1NBab53jcUnHSPqv0DgThHgdbvU2aNMUZ0p6Q3roX5z+XfUG2dRVHAl5fJ/Pp0u6S9Kn09SU7Yx2ZMa3Q/S1TTt+Ly1Zcp2kIfs6K/DxuY2+tQ+614fObIOdV4frQQfHKPBHSbpb0juSZ2PHsKj/HJDHz5P08dSeMQm8PcDPSDortf1KSR/OCPzQfG3vjm6VdIKkwyTdJOn8AX+Ylwl89O2oBT777/nMjb3TA9PXk3V828jc5qT9iP+yDW2K5iOSnkiLiz0l6fuSbGQ39Cmak1Ob3cdnS7p54NNxvyjpk95gSe+S9IkB+zor8EXP8ainaGrvGhU6UN+C9m/btZI+mjH8isxLVntZM8TDR/DWtr/JvGT9nQE2+HZJ21O7LpNkfh6yr+2/Fft22BHphaK9ZP69Afs6K/BFvn1b5iWrTd2N6qi1a1SPydg83FSSfVXqvvRnbT8+vYS0r0n+XXpJ1eNmFpoeBX5bmqN+JAnADxfm6u8Nm4e3NUnM3zZdYVMYQ/f1h9LXQu2dyl9KMr8O0df2FdAnJf0g/Yd6QYlvbWD3Z2lXvAfGNP/e30cXyyEAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCGwWgf9LXye9X9I9kn5qRsW2oFeV79T/PV9Xm0GS2xCAAASWTMDWNfHDFq76il8UnLM/PilIJgS+iAzxEIAABDaJQBR4+ym8/WDIDlvPxxZtslG9/Xjk7Sn+ekn/m0b99stCO3amNPZfwB+mOBN4W5bZflX4sCRbSsAOW77a8v1z+oHSb6Z4W0LiH1K59iMeT59uc4IABCAAgboEfIrGNs3477QqpZVhS1zYSpx2+IqF9ovB7Aj+5yTdkX4+b2ltUwY7TOD/OIXt18T2C2I7bGPs3Slsv8K0X6La5iS/L+nSFG8fAraePwcEIAABCDQgEEfwtjCdrXViQm6Ltu0Jyz7YqN0W+MoKvIn4b+TUbwL/uhR/Uljx0JY2thG9LyVhG1icK8k2eLBlFWxNGVuCgAMCEIAABBoSiAJvRf27pBPTTkG21Z8JvR0HkrjXEXhfk9v+A7D8dvxt2qQiXR50+pH0YWHibyslckAAAhCAQAMCUeB/LC09a1MktrHEx1K5P5MWcTNxt8W8Hgv12X6/RVM0eQJvUzQ2z+8fHD+aNurYkubnrej35qwIGqokCAEIQAACVQj4HLyNmu0lqS29aoeNuv8pvTz9VNrU2gTeDtv71V6E+ktW2+fWNjq3Mv4gpYnfookj+OelNPbi1sqwTdJt+71fS9e2cbgtA2zz8hwQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKBwP8Dajpbh65w6sgAAAAASUVORK5CYII=) Let's also plot the data and what the Neural Network has learned. ###Code for _ in range(100): x, y_target = sample_data() y = mlp.forward([Var(x)]) plt.plot(x, y_target, 'b.') plt.plot(x, y[0].v, 'r.') plt.title('True (blue) and MLP approx (red)') plt.show() ###Output _____no_output_____ ###Markdown [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-06/lab.ipynb) Contents and why we need this labThis lab is about implementing neural networks yourself from scratch. All the modern frameworks for deep learning use automatic differentiation (autodiff) so you don't have to code the backward step yourself. In this version of this lab you will develop your own autodif implementation, and use this to build a simple neural network. Once you've done this lab you should have a very good understanding of what goes on below the hood in the modern framework such as [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) or [JAX](https://github.com/google/jax). In particular the code we'll develop will look quite similar to the pytorch API. External sources of information1. Jupyter notebook. You can find more information about Jupyter notebooks [here](https://jupyter.org/). It will come as part of the [Anaconda](https://www.anaconda.com/) Python installation. You can also use [colab](colab.to), which is a free online jupyter notebook.3. [Nanograd](https://github.com/rasmusbergpalm/nanograd) is a minimalistic version of autodiff developed by Rasmus Berg Palm that we use for our framework. Nanograd automatic differention framework The [Nanograd](https://github.com/rasmusbergpalm/nanograd) framework defines a class Var which both holds a value and gradient value that we can use to store the intermediate values when we apply the chain rule of differentiation. ###Code # Copy and pasted from https://github.com/rasmusbergpalm/nanograd/blob/main/nanograd.py from typing import Union from math import tanh class Var: """ A variable which holds a number and enables gradient computations. """ def __init__(self, val: Union[float, int], parents=None): assert type(val) in {float, int} if parents is None: parents = [] self.v = val self.parents = parents self.grad = 0.0 def backprop(self, bp): self.grad += bp for parent, grad in self.parents: parent.backprop(grad * bp) def backward(self): self.backprop(1.0) def __add__(self: 'Var', other: 'Var') -> 'Var': return Var(self.v + other.v, [(self, 1.0), (other, 1.0)]) def __mul__(self: 'Var', other: 'Var') -> 'Var': return Var(self.v * other.v, [(self, other.v), (other, self.v)]) def __pow__(self, power: Union[float, int]) -> 'Var': assert type(power) in {float, int}, "power must be float or int" return Var(self.v ** power, [(self, power * self.v ** (power - 1))]) def __neg__(self: 'Var') -> 'Var': return Var(-1.0) * self def __sub__(self: 'Var', other: 'Var') -> 'Var': return self + (-other) def __truediv__(self: 'Var', other: 'Var') -> 'Var': return self * other ** -1 def tanh(self) -> 'Var': return Var(tanh(self.v), [(self, 1 - tanh(self.v) ** 2)]) def relu(self) -> 'Var': return Var(self.v if self.v > 0.0 else 0.0, [(self, 1.0 if self.v > 0.0 else 0.0)]) def __repr__(self): return "Var(v=%.4f, grad=%.4f)" % (self.v, self.grad) ###Output _____no_output_____ ###Markdown A few examples illustrate how we can use this: ###Code a = Var(3.0) b = Var(5.0) f = a * b f.backward() for v in [a, b, f]: print(v) a = Var(3.0) b = Var(5.0) c = a * b d = Var(9.0) e = a * d f = c + e f.backward() for v in [a, b, c, d, e, f]: print(v) ###Output _____no_output_____ ###Markdown Exercise a) What is being calculated?Explain briefly the output of the code? What is the expression we differentiate and with respect to what variables? Exercise b) How does the backward function work?For the first example above, execute the backward function by hand to convince yourself that it indeed calculates the gradients with respect to the variables. Write down the sequence of calls to backprop for the first example above. Exercise c) What happens if we run backward again?Try to execute the code below. Explain what happens. ###Code f.backward() for v in [a, b, c, d, e, f]: print(v) ###Output _____no_output_____ ###Markdown Exercise d) Test correctness of derivatives with the finite difference methodWrite a small function that uses [the finite difference method](https://en.wikipedia.org/wiki/Finite_difference_method) to numerically compute the gradient:$$\frac{\partial f(x)}{\partial x} \approx \frac{f(x+dx)-f(x)}{dx}$$for a very small $dx$. ###Code def finite_difference(fn, x_val, dx=1e-10): """ Computes the finite difference numerical approximation to the derivative of fn(x) with respect to x at x_val: (fn(x_val + dx) - fn(x_val))/dx """ pass #Insert code ###Output _____no_output_____ ###Markdown Use your finite difference function to compute the gradient of $f$ with respect to $a$ and $b$ in the following function: $f(x) = a \cdot b + b$, at a=3 and b=5. ###Code # test function - try to change into other functions as well def f(a, b): return a*b + b pass #Insert code ###Output _____no_output_____ ###Markdown Write the same function using Nanograd `Var`s and verify that Nanograd computes the same gradients ###Code pass #Insert code ###Output _____no_output_____ ###Markdown Create an artificial dataset to play withWe create a non-linear 1d regression task. The generator supports various noise levels. You can modify it yourself if you want more or less challenging tasks. ###Code from math import sin import random import tqdm as tqdm import matplotlib.pyplot as plt def sample_data(noise=0.3): x = (random.random() - 0.5) * 10 return x, sin(x) + x + random.gauss(0, noise) train_data = [sample_data() for _ in range(100)] val_data = [sample_data() for _ in range(100)] for x, y in train_data: plt.plot(x, y, 'b.') plt.show() ###Output _____no_output_____ ###Markdown Building the neural network.We'll create a feedforward neural network consisting of a series of dense layers. See the image below. Each dense layer is just a number of artificial neurons. In the image below each column of circles (neurons) is a dense layer. It's dense because the weight matrix is dense; there's a connection between every input and every output neuron in the layer.The inputs to create a dense layer is following:1. **The input size and output size**. We have to define the number of inputs and outputs. The inputs are the number of inputs to the layer, and the output size is the number of artificial neurons the layer has.2. **Activation functions**. Each dense layer must have an activation function (it can also be the linear activation which is equivalent to identity function). The power of neural networks comes from non-linear activation functions.3. **Parameter initialization**. We will initialize the weights to have random values. This is done in practice by drawing pseudo random numbers from a Gaussian or uniform distribution. It turns out that for deeper models we have to be careful about how we scale the random numbers. This will be the topic of a later exercice. For now we will just use simple Gaussians. See the `Initializer` class below.Note that we use Sequence in the code below. A Sequence is an ordered list. This means the order we insert and access items are the same. ![f2.jpeg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/4RDaRXhpZgAATU0AKgAAAAgABAE7AAIAAAAFAAAISodpAAQAAAABAAAIUJydAAEAAAAKAAAQyOocAAcAAAgMAAAAPgAAAAAc6gAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAERUUDMAAAAFkAMAAgAAABQAABCekAQAAgAAABQAABCykpEAAgAAAAMxMQAAkpIAAgAAAAMxMQAA6hwABwAACAwAAAiSAAAAABzqAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMjAxMjowODoyNCAxNDozODo0MAAyMDEyOjA4OjI0IDE0OjM4OjQwAAAARABUAFAAMwAAAP/hCxdodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvADw/eHBhY2tldCBiZWdpbj0n77u/JyBpZD0nVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkJz8+DQo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIj48cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPjxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSJ1dWlkOmZhZjViZGQ1LWJhM2QtMTFkYS1hZDMxLWQzM2Q3NTE4MmYxYiIgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIi8+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iPjx4bXA6Q3JlYXRlRGF0ZT4yMDEyLTA4LTI0VDE0OjM4OjQwLjExNDwveG1wOkNyZWF0ZURhdGU+PC9yZGY6RGVzY3JpcHRpb24+PHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9InV1aWQ6ZmFmNWJkZDUtYmEzZC0xMWRhLWFkMzEtZDMzZDc1MTgyZjFiIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iPjxkYzpjcmVhdG9yPjxyZGY6U2VxIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+PHJkZjpsaT5EVFAzPC9yZGY6bGk+PC9yZGY6U2VxPg0KCQkJPC9kYzpjcmVhdG9yPjwvcmRmOkRlc2NyaXB0aW9uPjwvcmRmOlJERj48L3g6eG1wbWV0YT4NCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgPD94cGFja2V0IGVuZD0ndyc/Pv/bAEMABwUFBgUEBwYFBggHBwgKEQsKCQkKFQ8QDBEYFRoZGBUYFxseJyEbHSUdFxgiLiIlKCkrLCsaIC8zLyoyJyorKv/bAEMBBwgICgkKFAsLFCocGBwqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqKv/AABEIAmgEnwMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/APpGiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAopsis0bKjbWI4PpXiGoeKfF8fxlh8JQayBbyjfv2dB6UAe40VHbpJHbok0nmSAYZ/U1JQAUUUUAFFFFABRRVe/voNOspLq6kCRoMkk4oAsUVBY3aX9lFdQ52SruXPpU9ABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVxnxO1DVtD8GX+r6Re+RJbJvClc5oA7OivL/gxr3iDxf4ZTW9a1ISqzFfJC46Vp/FbUNc0Dwlea3o2pfZzbKD5RXO6gDvaMgnAPIrhfhZqWs+IPBdtq+sX/nyXKnChcbTVnwl4e8QaT4j1e71vVTe2ty2beP8A55igDsaCQBkkAeporxX44a94i8PajpI03UzHZ31wqPCB7+tAHtVFQWTFtPt2Y5JiUn8hXL/EyXV7TwVe3uhah9int4y27bnPFAHXgg9DmivO/ghrOoa58N7e81acz3BkYM57816JQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFcn4+8eWfgjS0lkUT3k7bILcHlzQB1lFcFoum+MdVsRqF7rRsjcrvS2EefKB6Vk6n4s1z4a6nanxZdf2lpF5II/tm3BhP0oA9ToqG0uob60iubVxJDKodGHcGpqACiis2DXbO48QT6RE4a4gjEjgHOAaANKiiigAooooAKKKoWms2l7qdxZWziSS3GXK8ge1AF+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK8C1D/AJOos/8Arka99rwLUP8Ak6mzHfyjQB3nxe8Saz4W8NxahozhAJQkjEZ61jand+PtV8FprOlXq2LwQGUpjPnADNXvjz/yTc+n2qP+ddBZZ/4VT/3Dm/8AQTQBieEvG+qeI/hVLqqRqdSt4mEnPVhnn9K5HwP4v8e+OtE1BLGZY5IpSouyPuH+7im/Ax3b4X+JdzEhXlA9uGrT/Zz/AORa1bHT7Yf60AQfD/4keIrbx1ceDfHoxeEEwXGMbvStMeJvEWlfHC38N3mofarC6hMqrtxt9qoeNNOTUfj7oJ06MPcwwlpyvYA96NeYD9p3SCxCj7HjJ4oA1fi34i1/wlPpeoaVqG23uLpYZLYr1BrJ+PFvrE/hCwvrTVmtrdpYw9uo+8T3zUv7QdxD/ZehxeYu/wDtBDjPTmrXxsdT8L9PYMpX7RDznjpQB1Xw/wBL1ax0G0fUtXN7G8IKJtxtrr6x/C0iHwxpyh1LGBTgGtigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK4j4w8/CrWf+uNdvXEfGH/AJJTrX/XGgDzv4D+ONF0P4cxWd886zLIxOyIsPzrX+LfxA0LVPhnqdpayXBlkUBQ0JAzn1qb9nO2gl+FcLSQRMfNb5mQEmtv402tvH8J9XZLeJSEHIQcc0AN+ELzR/BrT3thmURMVHvVL4UeK9e8QeI/EVrr04dLOQCFAPuDNaPwa/5JBpv/AFyaub+EAJ8ZeMcdfMxQB0eqeJ9T1/xo/hrwzcfZvsnN1dgZ2+1eXfG/TNb07V/D7arqp1CF7ldilcbDmt74Ua7DY/FzxPo+onyrq4kDRFzgtij9oxlF54ZBYA/ahxn3oA7zxl41/wCEW0LTrWyTztTvkSO3j/AAn8K5n4h6J4th+Hd7dvr5nAgLSW5TG4EdM+1Y3xSvhpHxG8D6pcgNZpGqliflBNek/EeeOX4ZalMki+W9uWDA8HIoA5D4H6lBo/wUjvrxgkcTOST3PpWt5/iDxRpR1jTfEUelLIrFLXg4x0/OuB8MW897+y3cRWKmWZZGfYp5IBrqPhMfCvinwTbMSBeQDZcRNLgqw9qALHw5+ImqeJdF1uw1CPGraUGxJjAlx3rK+HvjHxl41tNWtBKqSRylEusf6nnpivQbOw8PWa6nHoccYuY4H85oznt3Neffs8Zx4i/6+z/M0AXPAnjPxDa/Ee98HeKboX0y5aKcDGBXr9eB2mf+GsZ/+uTfyr3ygAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvnTx5cf2z+03odhcgmC22kITwT64r6Lr59+LOnT+GfjJofjEW5NhuVJ5B2PvQB9BAADAGAK86+OdjBefCvUmuEDGFd6H0Nd9ZXkN/ZQ3Vs4eKZA6sDnINeb/HjWktPAMulQqJrzUm8mKJTlvrigDk/AWta837PL6tZ6mYZ9PDbcjO5VIGP1r0fwPquqeKfhzDdz3flX06cTgfdPrXH6X4Xn8Jfs231heDbO9qZXHoSQa6X4Pyxp8MLF2kQKF5JPAoAx/ht4j8Q+Jo9e03UNSzcWrtFDchfukHGa4b4V6X4hv/H/AIhjTxC8dzA+JJiuTIM9K6r4JOja/wCJcOpzdORz15rP+DMiJ8TfFhd1UCQ/eOO9AF/xn448X+H/AIhaTpVqBJbuQrJj/XmqnjXXfiR4OlHiCWcTaTvBltgv+rUnpVzx8Qfjh4WOQV8wYPbtXc/FJoh8M9Z8/btMBGG7nNAHKeM/G2pXPwuh8Y+FNS8hFVTJFtzuJ6iuttL3Utb+G1tqFvefZb2W180y4zzjNeSvp9zpv7Kk0V1EY2Zg6r/sk8V6n4Xnhi+Edk7yoFGn9d3+zQBz3w18Qa/428A6j9q1Hyb+Kd4UugvTHfFcj8F9J8QXOqavIPELjyrlllBXJk5roP2fpEbwbq211JN7KcZ571H8DJI1vPERaRRi7fOT05oA9kQFUUMckDk+tLQCCMg5BooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAGyqzxMqNsYjAb0ry+7+D1xdeNF8THX5Vv1Pyts6D0r1KigDivG3gO68Z6LBptxq7wwoQ0mF++w71bg8J30Hg1tCXV3JMflibbyFxjFdVRQB5x4a+Hf/AAr/AMK6xb2+oPcwzwyOyMuPmwea4b4F6Nqt5omrzaZq7WUZuiDGFzzzzXt+u6bNq2kTWVvdG1MylTIBng1y/wAPfh0/gKO4hh1R7mGd97Iy45oA0vDXguHRL6bUr24N9qcxO65cc49BWb47+G0Xi+9ttRs799N1K24S5jGTiu5ooA8w174Nx+I9DtrXVNXmlvIJBIbrHLY9q6LXvAFl4g8Cp4bu5m2RqNs3cMO9dbRQByvgvwa3hXTo4Lq/k1CaIbI5X42r6YrqqKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArmfGvhSfxdos2ljUGtbeddsgVc5FdNRQBwvw/+Hlx4DtRZW+rvcWKnIhZcc1c8deC7rxnpr6d/ar2lnKMSRqud1ddRQBxnhPwRe+FfDh0eHWHlgVSsRK/czVLwd8NLjwj4gu9Sh1mSZbxt08TL96vQKKAPO/GnwksfE2uwa7pl2+latEcm5iH3vTNZ2t/Bu58TrZP4h8Qy3lxaEESFMZxXqtFAHH+JfhzpfinwjDoupEsbdAIZ/wCJCO9c7D8JdVfw4+g6j4pnutMK7ViZeVH1r1KigDkfAfgC18EaA+kxTtdQvnO8dj2rjL/4BW6+ILjUfDmtT6Slw254Y+h9a9hooA5XSvBEOi+G59P066dLq4TbNdtyzH1rK8BfDSXwNc3b2+rvcR3TF3Rlx83rXf0UAebR/CiaP4iN4tGtSfa2OCuzjHpXpCghQCckDk0tFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFUNZ0Ww1/TZLDVbdZ7eQYKsKv0UAcBaeANd0oG30bxRNa2Kn91AUzsHpmrel/DyFdaTWPEV22r6hF/qpJBgJ+FdpRQBU1PTbfVtLn0+8TdBOhR19q8+8M/Ca48PXMkH9vzT6QzEiyIwAD2zXplFAHBeCPhfb+C9cv8AUIL6Sdbp2ZY2HCA9qrSfCO1j8Zza3p9/Jaw3JzcWyDiT8a9GooA8O+Jti918WPDVhaTG2diFjlHJWu5uvAV9rEsMXiDW5L6wRtz2xXAf61U1z4YXGteMLbxA+tyRz2rboVC8LXfW0ckVuqTSmVwOXPegDO1bw5Yav4ak0SaILaPGIwoH3QOlcV4f+E1xpEUtlea/PeaawIitiMCMV6XRQBw3w9+Glt4C+2rb3j3KXTlgrcBM9qo6T8I4NG8XXWqWWpypZ3bF5rMdGP1r0eigBsaLHGqIMKowKdRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFctrXxF8P6BrMOlajcPHdzOEjTZ94n3rqa8J+OEUZ+InhN9g3CUc4/wBqgD3UHKgjvS1UutQtdNsPtF7MsUarkljjt2rAtfiP4dur+O0FzJDJKcIZo9gb6GgDqqKxta8V6T4edF1W48gSDKsRx+dXNO1a01Wx+12Tl4cZDY60AXaKxrDxVpepao+n2kzPcR/fXb9361HrvjDSfDuBqEkjHGSIU3lfrjpQBu0VlaH4m0rxHY/a9Ku0ljH3gTgr9R2qu/jTQI5LlDqMRNqCZSGBAxQBu0Vk+HvE2leKLFrvRbkXEKttYjsa1qACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAr317Dp1lJdXJIijGWKjJxWL4X8c6J4vmuo9DuDMbU4kJXGDW3egNYThgCPLbg/SvFf2fY1j1jxQEAA+0n+dAHuNFYes+MNH0OZYbydmlPVIV3kfUDpUmieKdJ8Q2kk+l3IlEX30xhl+ooA2KK5y38eaBdah9hhuy11nb5W35vyrT1bW7HRLQXOoyGOI/xYoA0KKoW2tWN1pQ1FJdtsRkO/FYUHxL8Nzaklk11JBK7bVM0exWPsTQB1lFUdS1nT9IsvteoXUcMPGGZhz9Kx7v4heGrKa0iuNRRXuziIev1oA6aisDVPGujaRciC5lkdyAf3Sb8A+tX9I1yw1yBpdOl8xV4PGMUAaFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABXKzfEbw/B4mh0CS4cahM21YimK6qvCPG8Ua/tJaC6oAxReQPpQB7vRVLVNXstGtGuNQnWJB0GeW+g71j6Z8QNA1XUlsILl47h/uJMmzd9M0AdLRWFq/jLRdCvBbapdeRIRkbhwa0rfUre50/7bCWMONwOOSKALdFZGk+J9M1u4mh06YyND9/5cAVQ1j4gaDod00F7PISv3njj3Kv1NAHTUVQstb07UdLGo2l3FJakZ8wMMD61lTeP/DkOlSai+op9mjfYzj1oA6Siqmmanaaxp8d7p0yzQSjKup61boAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArwz43/APJQfCf/AF1H/oVe514N8bLlpviF4c+z2lzMLWUGVo4yQOaAPQfiH4Vh8R2FjNeaqbC2s3WV13YEmOcVwnxhvrLV/B9ldaTZlIrS5QLc7djKe2Pap/jbNq0p8P31nFcS6KjrJdRRqdx57j6VW+KvilPEPw1hPhnTJ5bNJ4zMTCVZMeg70Adrr3hqPxx8J4oL4lrkWvmJIOu4Dis34SeLRL4CubO+jEV1ogaN4zwWAya6n4e6k2peD7RjbvCscYQCRcE8eleX+LPCepaX8YrUeH5CLfVj5l1EOBt70Adn4YtnsPDWveKQgSe/V7mI45UAH+tZHw91HxBf+Hn1L+x7e/a+kLSSyPnPJ4xXplxpEL+HZdKgAjieBogB2yMV4D4U8Xaz8HtcvtA8R6bc3GkmUtDcRoWwM0AdV4F+H/iHRPiZqeqXcCW+jaijbrZJMhWPoKw/AvhLTb74u+J7KdWfT0kbFszHGfWvSPCvjC+8V3zapb2slvokUZ/1i4Z2x1xXBfD3V0i+M+vvLaXSQ3kpMMjREA/jQB6/4e8L6R4WtZbfQ7RbWKV97qp6mteiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAIbz/jxn/wCubfyrxX4AjOseKQDgm4PP417LqlwlrpVzNIGKrG2QoyeleJfAqaaHWPEsUlrcQS3EjNCZIyAeaAOt07SNL8F+LdTvtQ1BtSvNTICQ7d5i9sdq5fwQZbL486zDFF9kjuV3PAD8v5VS8A+IJPDPj/XU8a211JfXUuLeURF1IzwB6UaXrOoW/wAf7+6vdKmjM6qsJVCVK+pNAGj8RdNTwJ8SdK8ZWMBeGeTy7pP4Vz3rsfHVyniGz0vQrUK/9qFZQ4Odqjmt3xr4fh8UeD7zT7j5N8e9WxypAzXAfBDStTks57rXSZltZDHZuxzgA4oAl8d317beM/DHhnTLdJ7ZkzLbltocr0pvxD8LeIfGvhwWUfh+2trmJw0E6SAFMe9Hxu8Pay0mmeKvDMTSXuktuZV6suelGifGxtcso7GHRbpNbYBWiaMhVPrmgCj8RNAu4/gLEniPLapZ7FEiv905/Wtvwf8ADnQNW8EaZc6raC7vBEGEzMc7u1V/ixLewfCN7K/Sa61CdlbbEhboc4rpPhvqa3vw7tfJililgg2lZU2kMBQB5xp3jaT4d+N9R0vxvprixumBivdm4BR0GfpXq3g6LRZrebU/Dlyk1peEOFQ52n+lcxZeIdE8X2l3o3izTnea3LBmkh+8PY1m/Bfw1c6DrOvPa+dHossubSKXPHPvQB69RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAV4X44/5OP0D/cX+le6V4F42vd/7QejXaWl09vAFWSRYiVB470Aeg+O/DFpf65pmuapqrW9rp8gc2ueJfbHeuC+LN9Ffaj4c1fTLU2yJdBY7jbsY89CKn+K11qVl8S9B1O9guLjw7FtdkiUnDdyRVX4veJH1zTvD93o+mTyaXHdh2kERDA8cbaAO0+KHhJPFnw9F5gm/s4RPEy9WIwcU7wX46jvvhQdSliVbiwi8mSHvuGB0rrvDV6dV8N20ssBiVowuxxg4x3FeN23hDUdK+NEukafIX0e5/wBIuY+wB56UAdHdrceCfg5qepWmEvr0mZW9N5GBVvQYtauPBNvat4btp4ru3DSO0gJcsOtdV428NL4i8C32iwIA0kO2EehHSvIvAnxW1HwVYjwz410u732bGKC4jjLbwOgoA3vh54C1rw5pWv2XiGMLpkyvJbwrJnacVjfBHwbpmveGNWTWEN1bfbXRIHJwuDXo1hrepahoepa1qkEkFjJAwgtwMt064rjv2f7/AMjTdT026tbiC4a7eVfMjIBUnigD1jRtFsNA01LDSoBBbJ91Ac4q/RRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTGhic5eNGPqVBp9FADXijkTY6Ky/3SMimC1t1i8tYIhH/dCDH5VLRQBxHxB8dzeBoLc2WjyX/mnlYlPyj8Kg8FLqHifVD4p1q1NowUx2kLDkIeua7x40f76K31GaUAKMKAAOwoAWoZ7O2ul23MEco9HQGpqKAGRwxwxiOKNUQdFUYFAgiVtyxID6hRT6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAAgEYIBHoaYsMaHKRop9QoFPooAie1t5JBJJBGzjoxQEih4oU3TeShcDOdozUtFAHkF78Utc1bW7rw7pXh+ZJGby0uGUhSvQmvS/DmjR6DocNjGc7fmY/7R5NaIijDbhGob1CjNPoAQgMCGAIPY1BHYWcMxlitYUkPVlQA1YooAa8aSffRW+ozQsaIMIiqPQDFOooAryWNrKQXgjJBznaKnVVQYRQo9AMUtFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUwwxFtxiQn1Kin0UAMkhimTZNGki+jKCKb9mg2BPIj2r0XYMCpaKAPPvHvxIufBepW1nZ6JNfrLjLRqcL+VXvA1lfX0s3iXW4fIvbxdixf3Yx0rsWijc5dFY+4zTgABgDAoAKrzWFpcOHntYZGHIZkBNWKKAE2Ls2bRt9McU1Yo0OUjVT6hQKfRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGbr2u2vh3SJNRv1laGPqIU3N+VcN/wvbwr/z7at/4BNXpTIrrh1DD0IzUf2W3/wCeEX/fAoA85/4Xt4V/59tW/wDAJqP+F7eFf+fbVv8AwCavRvstv/zwi/74FH2W3/54Rf8AfAoA85/4Xt4V/wCfbVv/AACaj/he3hX/AJ9tW/8AAJq9G+y2/wDzwi/74FH2W3/54Rf98CgDzRv2gfBccwhkOoJKRkRtakMfwpf+GgPBn/UR/wDAU1teOPAVvr8C32mRRW+q24zFIEHzj+6a8/0+/jmeW01K1htL+24mjeMAcdx7VyYivOjqo3RMm0dKf2gfBaqWY6gFHJJtTgU6L4++ELiMSW8epTRno6WhINc1puk3HjrUzY6fDHBpULYubryx8/8Asj1Fex6XoWm6Pp0VlY2cMcMS4UBBWlGpOpHmkrDTbOE/4Xt4V/59tW/8Amo/4Xt4V/59tW/8Amr0b7Lb/wDPCL/vgUfZbf8A54Rf98CtxnnP/C9vCv8Az7at/wCATUf8L28K/wDPtq3/AIBNXo32W3/54Rf98Cj7Lb/88Iv++BQBwul/GTw5q+pQ2Nrb6kJZm2qZLQqM+5rv6jFtApysMYPqFFSUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABVXUr+LS9OmvbgOYoV3MEXJx7CrVIQGGGAIPY0AebH46+FlYg22q8f9ObUn/C9vCv/AD7at/4BNXo32W3/AOeEX/fAo+y2/wDzwi/74FAHnP8Awvbwr/z7at/4BNR/wvbwr/z7at/4BNXo32W3/wCeEX/fAo+y2/8Azwi/74FAHnP/AAvbwr/z7at/4BNR/wAL28K/8+2rf+ATV6N9lt/+eEX/AHwKPstv/wA8Iv8AvgUAec/8L28K/wDPtq3/AIBNR/wvbwr/AM+2rf8AgE1ejfZbf/nhF/3wKPstv/zwi/74FAHnP/C9vCv/AD7at/4BNR/wvbwr/wA+2rf+ATV6N9lt/wDnhF/3wKPstv8A88Iv++BQB5z/AML28K/8+2rf+ATUf8L28K/8+2rf+ATV6N9lt/8AnhF/3wKPstv/AM8Iv++BQB5z/wAL28K/8+2rf+ATUf8AC9vCv/Ptq3/gE1ejfZbf/nhF/wB8Cj7Lb/8APCL/AL4FAHnP/C9vCv8Az7at/wCATUf8L28K/wDPtq3/AIBNXo32W3/54Rf98Cj7Lb/88Iv++BQB5z/wvbwr/wA+2rf+ATUf8L28K/8APtq3/gE1ejfZbf8A54Rf98Cj7Lb/APPCL/vgUAec/wDC9vCv/Ptq3/gE1H/C9vCv/Ptq3/gE1ejfZbf/AJ4Rf98Cj7Lb/wDPCL/vgUAecN8ePCaKWeDVFUckmzbAqJP2g/BMqb4nv3Q/xLakivSZtPs54XimtYXRwVZSg5FeP+J/Csnga+a+0+1SfQ5mzJGIwTbk+lZVZTjG8FcTubP/AA0B4M/6iP8A4CmkT9oHwXLKYojqEkg6otqSR+FcvqOsWdrYRy2cMNxNcYWCNIwSSa734e+B00WzbUtXgifVLwbpPkHyDsBWGHxE61242Qotsz/+F7eFf+fbVv8AwCaj/he3hX/n21b/AMAmr0b7Lb/88Iv++BR9lt/+eEX/AHwK7Cjzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAPOf+F7eFf+fbVv/AJqP+F7eFf+fbVv/AJq9G+y2/8Azwi/74FH2W3/AOeEX/fAoA85/wCF7eFf+fbVv/AJqP8Ahe3hX/n21b/wCavRvstv/wA8Iv8AvgUfZbf/AJ4Rf98CgDzn/he3hX/n21b/AMAmo/4Xt4V/59tW/wDAJq9G+y2//PCL/vgUfZbf/nhF/wB8CgDzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAPOf+F7eFf+fbVv/AJqP+F7eFf+fbVv/AJq9G+y2/8Azwi/74FH2W3/AOeEX/fAoA85/wCF7eFf+fbVv/AJqP8Ahe3hX/n21b/wCavRvstv/wA8Iv8AvgUfZbf/AJ4Rf98CgDzn/he3hX/n21b/AMAmo/4Xt4V/59tW/wDAJq9G+y2//PCL/vgUfZbf/nhF/wB8CgDzn/he3hX/AJ9tW/8AAJqP+F7eFf8An21b/wAAmr0b7Lb/APPCL/vgUfZbf/nhF/3wKAOW8K/EjRvF9+9ppcN6kiruJuLcoMfU111MSGKM5jjRT6qoFPoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDk/FXxG0bwfeR22qRXru67gbeAuPzFYH/C9vCv/Ptq3/gE1ekPDFIcyRo5/wBpQab9lt/+eEX/AHwKAPOf+F7eFf8An21b/wAAmo/4Xt4V/wCfbVv/AACavRvstv8A88Iv++BR9lt/+eEX/fAoA85/4Xt4V/59tW/8Amo/4Xt4V/59tW/8Amr0b7Lb/wDPCL/vgUfZbf8A54Rf98CgDzn/AIXt4V/59tW/8Amo/wCF7eFf+fbVv/AJq9G+y2//ADwi/wC+BR9lt/8AnhF/3wKAPNP+GgvBQlMJa/Eq8mM2p3D8KX/hoDwZ/wBRH/wFNXvHXgFb2T+2/D0EMepxDLpsGJ1HY1xmn6nZ3lvI1xBDbTwcTxSRgFCOv4Vx4jETo68t0TJtHSN+0H4JjQvI1+iDqzWpAFPT49eEpUDxQ6pIh5DLZkg1z+geHZfH2oB3t0t9Cgb5mMYBuCO30r2K10uxsrWO2trSGOKNQqqEHArelOc480lYauzz/wD4Xt4V/wCfbVv/AACaj/he3hX/AJ9tW/8AAJq9G+y2/wDzwi/74FH2W3/54Rf98CtRnnP/AAvbwr/z7at/4BNR/wAL28K/8+2rf+ATV6N9lt/+eEX/AHwKPstv/wA8Iv8AvgUAec/8L28K/wDPtq3/AIBNR/wvbwr/AM+2rf8AgE1ejfZbf/nhF/3wKPstv/zwi/74FAHntv8AHDwxc3EcMdtqgZ2CjNmwFeiRSLNCkqZ2uoYZ64NNFrbg5EEf/fAqXp0oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArkPGPw80/wAWywzmVrO6Q4aaIcundTXX0Umk9GBS0nSbPRNNisdPhWKGIYAA6+5q7RRTAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKo6nrOn6PAZtSu4oFxkB2ALfQd6AL1FebX3xk06SV7XQLG6vbodC0RCH8azm8f/EG4407wrauw6+ZKRx+dUot7ITaR61RXkq+PPiNbc6j4UtEU/d8uUn+tXLX4xRWTrB4o0u5tJmOAYYy6j6mhxa3QKSZ6dRWXo3iTStehD6beRykjJj3DcPqK1KkYUUUUAFFFFABRRRQAUUUUAFFFFABUc8EV1A8NwiyRuMMrDIIqSigDiNB+F2k6F4mm1aN3mGc28D8rBnriu3oopJJbAFFFFMAooooAKKKKACiiigAooooAKKKKACis7Vte0zRIDLqV3FDgZCsw3H6CuEvPjHaXUjW3hrTrm8uVOMyxlU/OmlcD0yivJW8ffES4/5B/hS0cDr5kpH9aF8ffES3/wCQh4VtEz08uUn+tPkl2J5l3PWqK8ztPjJZ2zrb+I9OurO4J6xxlkH413ek69pmtwCTTbyKfIyVVhuX6iptYo0aKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArivE/wy0vxJrMGoGR7Vgw+0LEMCdfQ12tFJpPcCCzs7fT7SO1s4lihjXaqqOgqeiimAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFQi8tjcfZxPGZv8AnnuG78qAJqKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorg/Hfiq5jnTw74fbOo3PEki/8ALBT3ppX0Buw/xV4+NrdHR/DUYvtUfjI5SP6n1rmrXwRJqE4vfGV9Jqk+dywsSFiPoK29B0C10Cz8uFd9w/M0zcl27mtOuuFJR1Zzym3sRW1pbWcIitbeONB0AUVNn04+lJRWpkLk9+frTJYop4mjmhjdW6gqKdRQByt/4FtxMbzw1cyaRfdfMiJ+b2xWr4c8eXen3sejeNIxb3B+WK6/hk9ye1atU9V0mz1uya1v4hIpHynup7HNZTpqWxpGbR3asHUMpBUjII70tea+C/EN3oOrf8It4glMgJ/0G5f/AJaD+7+FelVyNNOzOhO+oUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAhIVSWOAOSTXBeKPH0xuzo3hGIXuoNw0o+5EPXPrUfjrxLc3d+nhjw8+bqb/j4mT/AJZL3Bp2i6JZ6BYi3skG5uZJDyWPfmtadPm1exnOfKYll4HS5uBfeK7uTVrs8gSEgR+wrqILeC1iEVvBHGi9AFFPorrSS2OdtvcXJ7cfSjJ78/WkopiGTwQXMRiuII3RuoKiuWvPAsdtObzwndyaRd53N5ZJEh9DXWUUmk9xptbGf4Y8e3Ed8ui+L4hZ33SKb+CUe59a9ABBAIOQehrgNZ0Sz16xa2vUG7rHIOCjdjmo/A/ia5sNQPhbxHIftMYzazt/y1T3PrXJUp8uq2OiE+bRnodFFFZGgUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBxfjnxNdWl9ZeG9FIGq6mCY2P8CDqam8MeAotBvBf3V/Nf3xB3SyH1rnPFagftA+EWHU2k39a9RoAKKKKACiiigAooooAKK5Dxl8S9D8D3dta6sLiS4uVLRx28e8kDvXOf8AC/vDH/Pjq3/gKalzitG0NJvZHqVFeW/8L+8Mf8+Grf8AgIa67wZ460jxzYz3OimUC3fy5UmTayn6UKUXs7g01udJRRRVCCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiijOelABRRRQBleJtdg8N+HrrVLnlYEyF7sfSvO/A+mTrbT65qZMl7qLGRXbqIz0FT/Fa7fUda0TQbVtwkn33SD+5xXQpElvEkEfCRLtX6V0UY/aMaj6C0UUV0mAUUUUAFFFFABRRRQBheMNEfWdEL2Z8vULX95bzDquOTXT+AvEqeJvDENw3FxD+5mU9dy8E/jVYcnB6Hg1yvguX+wfitq2nyHZaXqKbZe27vXPWjpzG1N9D1miiiuY3CiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKOtFABWF4x8QxeGfDVxfynD42RD1c9K3a8p+JM51rxzovhtTugYGabHRSDxmmld2E3ZXHeCtIlstOl1PUBnUdRbzJye3pXSUp4AX+6Av5Uld6VlZHI3d3CiiimIKKKKACiiigArnPGukve6SNQsspe6efOV16sB/DXR0qhWO1xlTwQe4pNXVmNOzuaXg3xCviXwxa3/AmZMSp3VvQ1u15R8NpzoXjjWdBnYj7ZIbmBD2UZ6V6vXA1Z2OtO6uFFFFIYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/kv/AIQ/69Jv616hXl/iz/kv/hD/AK9Jv616hQAUUUUAFFFFABRRRQB418QgP+F6+HMgH/QZOCM9zW0VTJ/dp/3yKxfiF/yXXw5/14yfzNbR6mvzDiv/AH9f4V+p9Fli/c/MVFTzB+7Tr/dFYHwg1jS9K1vxguo39taM2ogqksgTIweRmt9Pvj614NceDv7b8Z69rV2jPZW98ItqkjLnpn2ro4R/3mp/h/UzzRfu4+p9Uf8ACZ+Gv+g9p/8A4EL/AI0f8Jn4a/6D2n/+BC/415Jpuj+CtPnhsPGPhKC2lkAMd3EWaFl9S2eK7u1+FXw8vrdZ7PRLOeJujxuxB/Wv0c8E6D/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xo/4TPw1/0HtP/wDAhf8AGsX/AIU/4E/6F63/ADb/ABo/4U/4E/6F63/Nv8aANr/hM/DX/Qe0/wD8CF/xr5e8b/FvxHoPxIu20PWzcWCt8kaNlCK+hZPhF4DjjZ28PW+FGTgt/jXx58Rk0qLxxfQ6DD5NpG5VUz0IoA9j8OftSz+YkXiLTUCDrLDkk16z4b+M/g7xKwW11EQOe1x8nNfDVOQ4bv8AgaAPr2W7g1f48XgtJkuIoLFHDo2VzgV2pOTmvAv2fkurTxXqH9o7sy2w2FjkkV75XZS+A5qnxBRRRWpmFFFFABRRRQAUUUUAFcP4puoNJ+KfhG7uZkghlkYSSOcAfU13FeO/tAWM2qy+HLG0QySvIflHp3rOr8DLh8R7ufGfhoHB13T/APwIX/Gj/hM/DX/Qe0//AMCF/wAa8d8G6L4Ge1trHxL4bgjlb5FvNxMbt3BOeDXo6/CHwGyhl8P2xBGQQzc/rXEdRt/8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NH/CZ+Gv+g9p/wD4EL/jWL/wp/wJ/wBC9b/m3+NH/Cn/AAJ/0L1v+bf40AbX/CZ+Gv8AoPaf/wCBC/40f8Jn4a/6D2n/APgQv+NYv/Cn/An/AEL1v+bf40f8Kf8AAn/QvW/5t/jQBtf8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NH/CZ+Gv+g9p/wD4EL/jWL/wp/wJ/wBC9b/m3+NH/Cn/AAJ/0L1v+bf40AbX/CZ+Gv8AoPaf/wCBC/40f8Jn4a/6D2n/APgQv+NYv/Cn/An/AEL1v+bf40f8Kf8AAn/QvW/5t/jQBtf8Jn4a/wCg9p//AIEL/jR/wmfhr/oPaf8A+BC/41i/8Kf8Cf8AQvW/5t/jR/wp/wACf9C9b/m3+NAG1/wmfhr/AKD2n/8AgQv+NR3HjPw39ml269p4Ow4P2hfT61k/8Kf8Cf8AQvW/5t/jUdx8IfAqWsrDw9bghCeGb0+tAHzbf/GbxX4f8a3zWWrG8s1lOyJmymPau+8NftSxSSKnibTRCo4LwZOa8G8W2Edv4v1C10+ErFFKQqLzgVmiwZbdZ5nVYycEZ+YfhQB9x+HPi14Q8TKDZapHEx6LOQhrkbS4i1T4u6ncW8iyx2x2h1OR+FfJwuLazn3WoaXjhmJGDXvHwCklVLh7lmd7v5lZuuK0pfGiJ/Ce2nqaSg9aK7TlCiiigAooooAKKKKACiiigDjrmWK0+PGkXDOqM9mY8E9c17DXzn8V9NvdQ8e6ZLpErxXlpb+cWTrtHpXb+Ffibd21jbL4pt28mUARXcYzj/rp6GuKp8bOqHwo9VoqG0vLe+t1uLOZJ4m+66HINTVmWFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/wCS/wDhD/r0m/rXqFeX+LP+S/8AhD/r0m/rXqFABRRRQAUUUUAFFFFAHlfxJ8HeKtS8c6T4h8JxWk7Wdu0Tx3D7c5NY39lfFr/oE6R/3+r22ivPxOW4PFT9pXpqT2ub069WmrQdkeJjS/i0rA/2TpHH/Tatv4c+ANYsNI8RReMY7dZdZn8zZA24IMHpXqNFVhsvwmEk5UIKLYqlepUVpu55p4Z8qG6ufAniqFLiNMmyaQf62Lnv61JdeC9c8K3JvfA94ZIAf+QbO37sD2rY8f8AhuTVNPTU9MJi1OwPmxSJ95lHJT8a0fCHiSPxNoUd1wlynyXEXeN/Q13GJk+H/iRYX85sNZjbS7+P5XWcbUZv9knrXZqwZQykEEZBHesfxB4U0jxLb+XqlqkjqP3cvRkPqDXFtaeLvALtJaStrmjr8ziU5ljHoooA9NornfDnjfR/Ekai2mMFyetrP8sg/CuioAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKbJIkUbSSsERRksxwAK4fXPiREt0dN8K27apqDcKyDMSn3agDsb/ULPTbVp9QuI7eEdWkbAr4y+MOhWTeJrjWfDVpcnTpmJklZPk39yD6V9JWPgDUdeuhqPjq9afdydNRswrW74s8FWXiHwPc+HoI0tonj2xbR9w9qAPgOrOnWcmoalb2kIy80gUCtnxd4L1Pwfrk+nahHuaLJ3JyNvYmui+B/h99d+JunnZvitHEsg9qAPb9Q0ePwX4i8N3aLhNQgS2fj7rACvQnADnByPWo/iX4dk1rwsZLJN15YnzbdfcVl+F9Xj1rw9BPG2ZIh5Uw9HHWuqjLSxhVXU1qKKK3MQooooAKKKKACiiigBRyRXGwWq+JPjAtsQHTSYyX9twrqNT1CDSdKnvrptsUSnn37VU+FGjTra3niLUEKXmpOeD/AHAeKxrStGxrTWtzM0PTLS21/VfA+sxB7F2M9k7ddzcnB9q0LbUtV+Hl2tjrRlv9GdsQXYGXj9m9hUvxQsJLGOy8V2Kk3GlSBmRf+Wik45rsbSW08Q6DDLIiTQXUQLKeRyORXIdBbtLy3v7VLi0lWWJxlXU5BqavOLrTNU+Ht4+oaJ5l7ojnM9n1MI9UFdroevWHiCwW60+UMCPmQ/eQ+hFAGlRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZEEkTIejAg06q99f22m2b3V9MkMKDJZzgUAfIvxv8Ban4Q8TyatYoxsLsk+cozg+9ePs7OxZjknrX2drkF58XbebSYYTaaBn57lxh3YdNvtXzJ4++GmseCvEE9m9tLPagkxTqpIK0AcbEnmTIg/iYD9a+nfC2m/8ACI6r4VgYYgvrUszdlPpXzbo0Rn12xiAyXuEXH/AhX2f4/wDC883gOym0xB9r0xUcEf3APmqouzuJq6sbB60lUND1aDXNEtr+2PyuuCO4I4NX67zkCiiigQUUUUAFFFFABSqu5gB3pKo63qkei6Jc30jBWRD5YP8AE3YUDMDw/F/wkPxnk1FQHt9OtmtpF7biKuXtpD4M8VS2moQrP4c1pvnDjIjlPQD2rU+FGhSafoE+q3SlLnV5PtEit1XrxXUeI9Ct/EWiT2FyoJdSY3PVG7EVwSd3c64qyscZdeFtb8HznUPBM32myPLadK3yKvqtdB4Z8dab4gJt33Wd+h2vbT/KxPt6is74f67cKZ/DWtMRqGnnYjP1mQfxVqeJvBGneIT9oXNnqKD93eRcOtSM6WivNrXxPrvgmdLLxhC11ZE7Yb6Ibjj1f0r0Cx1C01O1W5sLhJ4m6OhyKALNFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/kv/hD/AK9Jv616hXl/iz/kv/hD/r0m/rXqFABRRRQAUUUUAFFFFABRRRQAUUUUAFea63DJ4A8YJrtoh/sjUH2XkafwyHo30r0qqeq6Zb6xpk9jdrmOZCpOORnuKALMM0dxCksLh43GVZTwRT68+8DancaFq83g/WGO+ElrGQ/8tIueteg0Acp4j+H+l65Kby23adqX8N5Bw1c/H4k8S+BnW38VWr6hpoO2O7txuk+rV6XTZI0ljaOVQ6MMFSOCKAKGka9puu2wm0y7jnGMsqtkp7Edq0a4LWPhtHFcNqHhG6fSbsfMYojhJj/tVDp/xAvtGvV0zxzYm1nJ+W6iGYiPUmgD0OiobS8tr+2W4sp0nhbo6HINTUAFFFFABRRRQAUUUUAFFFFABRRRQAUUVk674m0vw7bGXUrlEfGViB+d/YCgDWrlvEfj7SfD58kM17eE4FtbfMwPuO1c42p+KvH7NFpEb6NozHBuX+Wb8BXUeG/A2k+HSJ0j+1X5GHvJuXegDmItD8V+OXW48Q3TaRp3VLa2PzSr6NXcaJ4d0vw7a/Z9JtEt1PLFRyx9Sa06KACsTxb4jh8M6FLePh5j8sEXeRvQVsyypDE0krBEUZZj0ArzbS45PiD4ybV7hT/Y2mSbbVG/jkHU/SgC54X8CWt9ok934rtkvb3Usu5lGSiMOF/CovAXwi03wB4n1DU9MmZ47tNqo3/LPnoK9EooACAQQehryTUtPk+HniZ7mJCdD1B8tjpFIepPpXrdVdS0621bTprK+iEsEy7WU1UZOLuhNXVjmEdJYlliYPG4yrDoRS1yd1a6r8Obry5RLqOhSN+7kAy8PsfYV0enanY6vAJtMuY7hcZIQ5K+xrsjNSWhyyi4lmiiirJCiiigApQM59ByTUN5d22n25nv50t4h/FIcCuXF3qvjq8On+Hle10wHE96wwWH+z61MpKK1KUW9hJopfiB4kXSbPJ0ezcNdzD7shB6A161bwR2trFbwqFjiQIoHYAVR0HQrLw7pUdhp0YSNOWPdz3JrSrilJyd2dMVZWIL20ivrKW2nQOkilSDXAfDa7l0bV9U8H3jlmsX82KRv41Y9B9K9Grzn4kWz6Hqum+LrMFRZyBblV/5aKTgZqSj0YgMCCAQeoNcJrnhC80i/bXPBz+TcL80tnnEcw7/AI12llcpe2MFzGQVljVxj3GanoA5zwt4xs/EcJiYG2v4+JbaXhge+B6V0dcp4o8Fx6vKNR0qU2Oqx8rPHxv9jVfw340ka8OieJ4/sWqRfKGbhJvdT60AdnRRRQAUUUUAFFFFABRRRQAUUVy/ijxpb6Hizsk+2anJ8sdvHyVPq3oKANLxB4k0/wAOWRuL+UbiP3cK/fkPoBXIWWjap49vU1LxKGttJQ5t7EceaPVxV3w/4LuLm+GueMJPteoMd0cBOY7f2FdwBgYHSgCO3t4bS3SC2jWOJBhVUcAV578UbganPp3he1RWvL9w+7GSqA8ivQridLa2lnlOEiQux9gM1514EgfxN4q1HxZeDfDvMenk/wAKdDQBl6h+z74fk1yx1XSibSW2ZWaJfuuR3r1kwq9t5MgDIU2sD3GMVJRQB5HcWknw98TypICdE1F8o3aBs9K6wEFQynKsMgjvXQ6zo9nruly2OoRiSKQd+qnsR715jL/a3w9uPs+qiS/0UtiO5UZdB/tV0U6ltGYzh1R1tFQWN/aapbifTrhLiM90OcVPXSYhRRRQIKKKgvr600y3M+o3EdvGBkFzjP0oGT8BSzEKqjLMegFclHZy/EXxMlvEGXQ9NkDyS/8APSQdAPUUsLar8Qbo2mlJJZaKjYnuGGGk9l9RXp2j6PaaHpkVjYRhIoxj3Pua5qlS+iNoQ6suoixoqIAqqMADtS0UVzmxwvxB0O5jaHxPoo26hp4zJt6vEOorpvDuu23iLQ7fUbU4Eq5ZD1Q+hrTdFkQo4DKwwQe9eaoz/DvxuyOT/YWrPkMf+WUp6Ae1AHo11awXts9vdxLLFIMMjDIIrgL/AMFar4Zu21PwPcsFBy2myH90R7e9eiAggEHIPeigDkfDfxAsNXk+xaip07UUO1oZ/l3t/s56111YHiTwbpfiWPddReVdqP3V1Hw8Z9RXJx674h+H8qW3iOKTU9JztivIhukH+9QB6XRVLStYsdas1udNuEnjI52nO0+hq7QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/AJL/AOEP+vSb+teoV5f4s/5L/wCEP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAch4/8Nyapp8epaZmPU7A+bHIn3nUclPxrR8IeJI/E2hR3XCXKfJcRd439DW9XmutQyeAfGCa7aIf7I1B9l3Gn8Mh6N9KAPSqKZDNHcQpNA4eNxlWU8EU+gAqrqOmWerWbWuo26XEL9UccVaooA84u/BOt+GLo33ge8LRg86dO37sD2rS0D4kWV7N9h1yJ9Lv0+VhONqO3+ya7WsfX/C2k+JINmp2qSOo+SXHzIfUGgDXVgyhlIIIyCO9LXmbWPizwC7SafM2taODudJTmVB6KK6nw7440jxGipBKbe6PW1n+WQfhQB0dFFFABRRRQAUUU2SRIYmklYIijLMTgAUAOqtf6lZ6XbG41C5jt4h1eRsCuN1r4jxm6OmeFLZtTvm4WRBmJT7mq9h4A1DW7pdQ8c3rXDk5Ono2YVoAju/HOteJrlrHwNYEqCRLd3I2pj1U960NC+G1pbXAvvEVxJrF9ncGuOREfRa7G0s7ewtUtrOFIYUGFRBgCpqAEVQqgKAAOgFLRRQAUUVheLvEkXhnQpLs4e4b5beLvI/oKAOc8c6rc6zqkPhDRXPnTkG9kX/lnFx0967PSNKttF0uGxs1CxxKFz3Y+prm/AHhuTTbGTVdUzJqeoHzZHf7yKeQn4V2NABRRRQAUUUUAMmhjuIWinQSRuMMrDIIrz3W/hXEs7X3hK7fS7gfN9njOI5D716LRTTsB47LqHjzw8ude0qC6gHRrT5mIqGP4qWRfZN4d1iMjgs0JxXtFMkijlQpKiup6hhmtFVkjN04njcvxU0+NsJoGrzD+9HCSKkh17xl4gGPDOirAD/FfDbgV6/FbwwJthiRB6KMVJQ6smCpxPM9J+GF3qNwt741v3umPJsUbMQr0WysrbTrRLWxhSCGMYVEGAKnorNtvctKwUUUUhhVPV9Og1bSbiyuYxJHKhGD69quUUAef/C7UZreG98M6k5a902Qn5v7hPFegV5r42ifwv420vxRbDy7SR/L1Fh3XoK9HhlS4gjmiOUkUMp9QRmgB9YviTwvY+JbMR3S7J4+YbhfvRn2raooA890zxHqPhDUI9G8W7pLVztttQHIx/tntXoEciTRLJEwdGGVYHIIqtqel2esWElnqEKzQuMFWHT3rgc6v8N7n5zLqWgM3J6yQ/wD1hQB6TRVXTdStNWsY7uwmWaGQZDKc49qtUAFFFFABQSFBJOAOSahu7u3sbV7i8lWGFBlnc4Arz671fVviBePp3h8yWOkI2Li9PDP7J6igC7r3jK71G/Oh+Doxc3bfLJc/8s4h359a1PC/gy10AG6uHa81KXmW6l5bJ7CtPQvD9h4esBa6fEFH8b/xOfU1p0AFFFNkkWKJ5HOFRSxPoBQBw3xP1i4i0230PS2/4mGpOFVR3jzhq6nw/o9voOhW2nWi7Y4U6e561w3hWNvF3xBv/EV0C1rp7GLT27EHrXpdABRRRQAVFc20N5bvBdRrLE4wyMMgipaKAPN9Z+FrW1y194OvX0+Uci0BxExrFm1jxv4eX/io9HjuIh0ayG4kV7FRVxnKOxLinueLxfFSyeTZN4f1eE93eEhaJPipYq+yLw9q8pPRkhJGa9llginjKTRq6nswzRHDFCm2KNUX0UYq/bSJ9nE8gi1jxx4gXHhzR4rdD1a9G0gVs6P8LGurhb3xhfSahJ1+yE5iU16TRUSnKW5SilsRW1rBZ2yW9rEsUSDCoowAKlooqCgooooAKyfEug2/iPQ57C4UbmXMb90fsRWtRQBw/wAP9enIn8N6yxGo6cdil/vTIP4q7iuF+IGh3MMsPinRBtv7D/W7eskQ6iun8P65beIdFg1C0PyyrlkPVD6GgDTpssSTRNHKgdGGGVhkEU6igDz/AFXwJe6PetqvgW4+yz53NYscRSH1NXPDnxAhvJ/7N8QxHTNTT5WWT5Ukb/ZPeu0rF8ReFNM8TWvl38I85R+6nX78Z9RQBtdelFeaLeeJfh5KE1ESaxomcCcfNMn1HpXc6Lr+na/aCfTLlJhjLKD8yexHagDSooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAqnq2mW+saXPY3a5imQqfUZ7irlFAHn3gbU7jQtXm8H6w3zwktYyH/lpF7+9eg1yHj/AMNyapp8ep6ZmPU7A+bG6fecDnZ+NaPhDxJH4m0KO54S5T5LiLvG/oaAN6iiigAooooAK5XxF8P9K12Q3UG7T9Q6i7t+HrqqKAPNIvEPibwM6W/ia1fUtNB2x3UA3SfVq7rR9f03XrYTaZdRzgDLKrcp7EVfkjSWNo5VDIwwVI4IrhdZ+G0QuDqHhS6k0m7U7vKhOI5T/tUAd5RXlkvxabwbMdO+IcAt7sLuSWAZRx/jT4Na8RfEuLf4fcadoMhwbrOJj9KAOm8R/EDSdAPkRlr69JwLa2+ZwfcVz0WgeKfHDrceI7ptK0/70dtbH5pF9GrqPDngfSfDmJoo/tN7/Hdzcu1dHQBm6L4e0zw/a+RpVpHAp+8VHLH1NaVFFABRRRQAUUUUAMmmjghaWZwkaDLMxwAK840eKT4geMn1m6Rv7H059tpG/wDFIP4vpVnxzqdxrurQ+D9Gb55sNfSD+CL2967XSdMt9H0uCxtFxFCgUepx3NAFyiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMvxJo0Gv+HrrT7lN6SISB/tDkfrXNfDDWZ7nRZdH1R/+JlpzlJEPUJnC/pXc15p4hRvB/wASrTXYgVstUPlXr9lx0oA9LopqOskauhyrAEH1FOoAKbLEk0TRyqHRxhlI4Ip1FAHnmpeHdT8HX0mreEyZbJjun08/dUdyorqvDfiew8TWPnWbFJV4lgfh4z7itmuL8SeDJ/t/9t+FZfsmppy8Y4Sf/eoA7Ss3XNfsPD9i1zqEwQY+VM/M59AK4tfizbQwPp93aSjXk+T7GBy7eo9qt6H4NvNUvhrfjNxPdMd0VmDmOEdvxoAp2umar8Q7xb/Ww9loiHMFmOGmHq4r0G0s7ewtUtrOJYoYxhUUcCpgAoAUYA4AFFABRRRQAVxnxM16bSvDn2TT/mvr5xCiDqVPDGuyJCqSxwByTXmmlg+NPindahLk2WhnyoP7shPegDs/Cegw+HPDVrp0HOxdzE9Sx5NbNFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAI6LIhRwGVhgg9681iZ/h545aJiRoerPuDN/yzlPRR7V6XWR4n0CDxHoU9jOo3sMxOeqP2IoA1wQRkcg0VxPw/1+eRJvDusMV1HTj5YL9ZkH8VdtQAUUUUAI6LIhR1DKwwQe9cJrPw9a2um1TwbOdOvQdxgU4jmP8AtV3lFAHDaH8QgLsaX4sg/s3UF4MjDETn/ZNdwrK6BkIKkZBHeszXPDmmeIrTyNUtklwPkcj5kPqDXDlPE3w7kLRmTWdDzk55mj9gPSgD0yisjQPE2meJLQTadOGfGXhJ+eP2IrXoAKKKKACiiigAooooAKKKKACiiuU8VfEbw/4SiP265E04ODbwHc/5UCbS1Z1dFeGar8ddWvn/AOKT0hdnrefLWU/xU+IE7b5LeyhPTah4qHUgupxVMwwlN2lUVz6Ior53T4rfEG2yY7Wxnz1Eh6Vt6T8ebizTb4s0l1k9bRdwoVSL2Y6ePwtV2hUTZ7bRXP8Ahrxroniq2STTLtDKwyYGOHX6iugqzt3CiiigAooooAKKKKACiiuY8Xp4sKwP4RNsXU5kW4OARQB09Fcn4S8Xy6tcTaVrUItdXtR+9Qfdb3WusoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAK811qGTwB4wTXLRCdJ1B9l3En8Mh/jPtXpVU9W0y31jS57G8XdFMhU+oz3FAFmGaO4hSaBw8bjKsp4Ip9efeBtTudD1abwfrLfPDlrFz/HF7+9eg0AFFFFABRTZZUhiaSVwiKMszHAArhdZ+I6SXR03wlbNqd63CzIMwof9o0AdlqOp2Wk2pudSuY7aEfxyHArgrrxvrfii5ax8EWBVASJLy5GEI9VNSad8Pr7WbtdS8dXrXUpOTYI2YFrvbS0t7G1S3s4UhhQYVEGAKAOM0r4Y6ftabxQ51u5k5P2oblQ+gqne+Cda8N3Z1DwPenYOunTHEQHtXotFAHE6D8SLK7m+w6/E+lXyfKRcDakjf7JrtVYMoZSCCMgjvWTr3hfSfEcGzVLRJXUfu5MfMh9Qa4l9P8WeAWaXTJ31rSAd0kcxzKo9FoA9NornPDnjnSPEQWOKU214etpP8sg/CujoAKKKKACsHxf4kj8M6FJdcNcv8lvF3kf0FbU88VtA807hI0G5mboBXnWjQSePvGDa7dof7I099lpE/wDFIP4xQBs+APDcml6fJqep5k1O/PmyO/3kB52fhXYUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFYHjXw+niXwpd6exwzDehHUEcit+igDkfhx4gfWvDKQ3nyXtmTDLGeoC8A111eZXGfBXxYW5/5c9fwj/3YmFemgggEHIPSgAooqO4uIbS3ee5kWKJBlnY4AFAEnTrXE+IvGlxNff2J4RiF5qMnytN1jh9cn1qhf67qvjm9fS/C+6201Di4vzxuH+wa67w94asPDdl5FkmZG/1szfekPqaAOSX4VQSWZvLi9lbXj84v/4lb0HtVrQvGN3pt+ND8Yx/Z7pflju/+Wco7c+td1WZrugWHiHT2tdQiDD+Bx95D6igDTBDAEHIPINFec2er6r4AvU03xAZL3SXOLe9HLRj0evQba6gvLZLi1lWWJxlXU5BoAlooo6UAcp8RvER8PeEppIRvuLgiCNB1+bjI+lWfA3h4eHPC1taMd87DfLIerE881yuT41+LDKebHQPldD92Vj0NemAAAAcAUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHCeP9Fuba4h8VaIMX1jjzgOskXcV1Oga3beINFt9QtD8sqglT1Q+hrQdFljZJFDKwwQe4rzaBn+HfjdoHJ/sPVX3Kzf8spT0Ue1AHpdFAIIBByDRQAUUUUAFBAIweRRRQBxHiD4fJLcnU/DE7aZqKncREcJMf9qodH8fXNhfDSfG1v8AYbscC6A/cv8Aj613tZ+saHp+vWRtdUtknj6jcOVPqKAL0ciTRLJEwdGGVYHIIp1eaSaX4j+H0jXGkSyato4O6WCU5kjHotdd4d8X6V4lhBspglxjL20nEifUUAbtFFFABRRRQAVHcXEVrbvPcSLHFGMs7HAAqSvC/ij42k8Q6i3h7RZyLOE/6VKh4f1WplJRV2Y168MPTdSb0RL44+Kl3rMsuk+EHMUA+WW96H/gNcBDp0STG4uXa7uj96eU5Y1Yhhjt4VigUKi9AKfXDOo5n5/js0rYuVr2j2/zFyaSiisjyQozxggEfSiigCmbAw3H2rSriTT7vr5sJwTXqXgP4sN5kWjeLiIrjhY7r+F/qfWvOahurWK9gMUwyOoPcGtoVXH0PZwGbVsLJRk7x7f5H1QjrIiuhDKwyCO4pa8i+E3juQyjwxrs3+kJxbSuf9YPSvXa7k01dH39KrCrBVIO6YUUUUzQKKKKACiiigDzHxq39n/GDwhJb/I19K0cpH8QHrXp1eX/ABD/AOSteAv+vl69QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAOP8f8AhuTVNPj1LTMx6nYHzYnT7zgc7PxrS8IeJI/E2hR3PC3MfyXMXeN/St6vJvFurWXwq8WDXTMP7P1A4uLOM/OXP8QFAHrNcp4j+IOlaC5t4d2oX2dv2a2+ZgfeucGo+KPiIAdJB0jQpQMztxMw9RXWeHPBGk+G1WSCL7RefxXc3MjfU0ActF4d8UeN5FuPE122maf96K2tThnX0eu60bQNM0C18jSrSO3U/e2jlj6mtGigAooooAKKKKACiiigDlvEfgDStedrmIGw1A8i8t+HrnI9f8T+BZEt/Edq+qaYPljubcbpAPVq9MpskaSxskihkYYIPQigDP0bxBpmv23naXdxzgD5lU8p7EVpVwmtfDWA3H2/wrcvpF2p3eXCcRyt/tVg6r8U9U8FafLa+M7HZfEbbe4iH7pz2yaANjxvqVxr+sweENHY5lw19IP4Y/T6122l6bb6RpkFjZrtihQKPf3rlvhvo8MGjHWHuFu7zUT5skwOdoP8I+ldpQAUUUUAFFFFABRRRQAUUUUAFFNkkSKNpJWCIoyzE8AVwWv/ABTsrWc2Ph2B9Vu2+UPCN0aN/tGi1wO/pGZUUs5CgdSa8gl/4WF4iTbqlzBptuehtThsVXT4a3JffceMtXlB+9Gz8VqqUmQ6kUezJIkq7o2DL6g5p1eMSfDSUnNv4v1a3H91HOKlhsPHnh0E6HqSamo4xetnIodKSEqkT2KivN9F+KRhuV0/xhZPp9x0a5A/cn8a9DtrqC9t0ntJVmicZV0OQazatuaXuS0UUUgCiiigAooooA5j4geHj4h8KzwwER3MP72KQdQV5wKd4B8QjxJ4Tt7lhslizDIp65XjNdIQGUhhkEYIrx/Udfi+F/xAuYPLe5h1rH2W0h5KP649KAPUtY1qx0Kwa71KdYo1HGTyx9BXDQ2ur/Ea6FzfiTT9BRv3UHR5v972q1pHhC+16/XWvGjiVid0NiD+7Qdsj1rvVVUQKgAVRgAdhQBBYWFrplklpYQrDBGMKijirFFFABRRRQBBe2VvqFo9teRLLDIMMjDg15/cWOq/Dq7a80vzb/Q5GzNbdXh/3favR6RlDqVYAqRgg96AKGja3Y69YLd6dMsiEfMAeVPoazfHPiBPDfhS5vD99/3MY77m4FYmseEb7QtQbW/Br+W4O6axP+rkHfA9a5uDX0+J3jmxsPKeC10wFr20m4JcdDigDtvh34fbQ/DERu/mvbn97PIerZ5Ga6ukVQihVGABgCloAKKKKACiiigAoopk00dvC807rHGgyzMcACgB9Feea78U4FuW0/wvavqV0eFnQZiU+5rBmt/H/iJcazfRaZEfu/Y2wwFXGEpbEuSW56+7pGhaRgqjuTSo6yKGRgwPcGvGI/hrcb91z4w1acHrGz8UP8NJ92638Y6tCByEV+Kr2MifaRPaKK8eit/iB4eUnRr6HUox94XjZOK3dD+KkDXK2Hii1k0y56GZxiJj7GocJR3KUk9j0SimQTxXMCTQSLJG4yrKcgin1JQUUUUAFFFFABRRRQAVkeJ9Ag8R6FPYzKN7DMTnrG/YiteigDifh9r80sEvh/V2K6jpx8v5/vSoP4q7auD8faLc2d1D4r0UYvLL/j4Uf8tIu4rq9C1q21/R4NQtD8kqgle6n0NAGjRRRQAUUUUAFFFFABXHeIvh9Z6lOdQ0eRtM1NTu86DjzD6N7V2NFAHnmm+OtQ0K+Gk+OLUwSAgJfRj90R7n1rstQ1yx0/QJtYlnRrOKMyeYp4I9qn1LTLLV7NrXUrdLiFuqOM18nfGvWz4d1CTwp4f1aaawYbpoi2RGT/AKAPY/hb8abbx1rN9pd4qQXEbk2/YSJnj8a9Yr869E1m88P6xb6lp0hjngcMpB6+1fcfw08eWfjzwpBfQuBdIoWePPIYdTQBF8U/Fo8K+E38liLy9Jht8dmPevCNOtTa2p38zTHzJT6setdZ8YtTfVfiNbaNndBZxCfPYNXPMcsTXHXld8p8ZxDiXKpGgtlqxKKKK5j5cKKKKACiiigAooooAp36zxGHULA7b2zYPCw7etfR3gfxLB4r8KWuowNkldkmf74HNfPvUEeoxXbfAjVWttS1Xw43EcH75Pck811UJfZPreHsS7yw79V+p7bRRRXWfXBRRRQAUUUUAeX/EP/krXgL/r5evUK8v+If8AyVrwF/18vXqFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/5L/4Q/69Jv616hXl/iz/AJL/AOEP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFNlljgiaWZwiIMszHAAoAdVTUtUsdItDc6lcx20I/jkOBXG6x8RxNdnTPCFq2p3jcCdBmJD7motO+H15q92upeOLxruYnJskP7kfhQBDdeNtd8U3DWXgexMaAkPe3S4Qr6qauaf8LNOe0uD4jmk1a6uUw0k5z5RP9yu3tbSCytkt7SJYYUGFRBgCpaAPP/BGq3OiavN4P1lvngy1i5/ji9/evQK4/wAf+G5NU0+PU9MzHqdgfNidPvOB/B9K0vCHiSPxLoSXPC3MfyXMXeN+4oA3qKKKACiiigAooooAKKKKACiiigCO4uIrW3ee4cRxRjczN0ArzbS7AfErxBPqmtW4k0S1YxW1vKOHYfx1a8a6jceI9bg8IaO338PfyDoIvT613Omadb6TpsFjaLtigQIv4UAcJfeB9Y8O3jaj4HvSM9bCdv3QHsKvaD8SLS5mFh4ihfSb5flPnjakjf7Jrt6ydd8M6V4ig8vU7VJXAwkmPmQ+oNAGqrK6hkIKkZBHelrzN9M8V+AWaXSJ31jSAd0kUx3SqPRa6jw5450nxEFijkNrenraT8SD8KAOkooooAKKKKACq99fW+m2Mt3eyrFDEu5mY9KsE4GTXk+talL8QPEr6dayFdEsX/fMvSZx1U04pydkJuyuNvdT1X4iXRW2aXTtBjbG4cPN/wDWrd0rRdO0ODytLtUhB+8wHLn1NW4oo4IUhgUJFGNqqOwp1dsYKK0OaUnIM0UUVZAUUUUAV9Q0+z1W1NvqVulxF/dcdK5qJdX+H919r0lpL7Rif31qeWiH+yK62l7EEZBGCPUVMoqS1KjJx2Og0bWbPXtLiv8ATpRJFIO3VT3B96v15ELiX4e+JVvbckaHfPi4i/hhJPUV61BMlxbxzRNuSRQykdwa4pRcXZnUndXH0UUVIwopksscETSzOERBlmY4AFef6l4j1PxjfyaR4QzHZodtzqB4BHoh9aANHxL40kjvBovhiMXuqyfKWXlIPdq5rXfh1dw+HZdZkuXu/EEWJVlY5EeDkhfwrvPDXhax8M2hS1UyXEnM1w/LyH3NbMiLJGyOMqwII9jQBjeENei8R+GLW/hOSV2P/vDg1t15p4XdvCHxDvvD0x8vT70+ZYIe7dWr0ugAooooAKKKKACiiigDH8V65F4e8N3V/M23apVD/tkcfrXnug/D69u9CTxAtw9p4iuMyu4OA/OQD+FX/F0jeLfHun+Goj5mn2582/A/hYcrXpMcaxRJGgwqKFA9hQByHhnxo1zdnRvEcX2LVYvl+bhJvda7GsLxN4UsvEtqFnzDcx8w3MfDofrXOaT4n1DwvqEei+MAfKY7be/H3COwY+tAHoFFIjrIiujBlYZBHcUuRnGaACiikd1jRnchVUZJPYUAVNV1S00bTZb2/lEcMYyST1PoPevL7m61f4izl52k03QQ3yIOJJR7+1O1C9k+IXieSEMV0TTnwfSZgetdUqqiKiKFRRgAV0U6d9WYznbRFTTNKsNGthBplslunfaOtW6KK6TEKKKKBBVXUtLsdYtzBqdslwnbcPu+9WqKBnKWs+r/AA8ufNt3k1DQnb95G3Lw/T2r1DS9UtdY06K9sZRJDIMgjtXNEK6Mkih0cYZT0Irk7O/l+HXiZAWZtB1GTbt/54yHoB7VzVKdtUbQnfRnrlFIrBlDKcgjIIpa5zYKKKKACiiigAooooAbJGksbJIoZGGCD0Irza1d/h540a0kY/2Jqj7o2b/lnKf4R7V6XWP4n8PweJNDmsZgBIRmGQ9Y37EUAbAORkciiuK+H+vzzRTaBrBKalp58v5/vSoP4q7WgAooooAKKKrXeo2dgu68uY4R/ttigCzRXNXXxD8KWblJdctPMH/LNXyxrCvPi/pkGfsWl39/jp9njzmgDu72TyrGaQ5+VCeOtfGvid/h5qOu37yzammoPOwdm+5nNfQn/CwfEWtoU0bw3c2wkBA+1pjH1r5A8VxXMPizUlvkCXHnsXVegOaANabwbbTu0lhrFoIcZUSvhq3fhp4q1H4ceMopYn+12Ex2XCxHK49R71w+ieHtU8RXottItJLh8/MUXIUeprqrzSrbwQUtTeLe6w5H7uE5jTPY+9AHoWq6rFr3jq71S3JMcqfLnqB6U6sjTLSSw1ARzDEk0AlYehNa9efV+Nn5znF/rs7hRRRWR5QUUUUAFFFFABRRRQADrWx8MbgWvxVIJx9oQL9axx1rV+G9sbr4qow5+zqGNbUfjPbyK/11W7M+jqKKK7z9ACiiigAooooA8v8AiH/yVrwF/wBfL16hXl/xD/5K14C/6+Xr1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigAorzH4jeOfEujeMNL8PeFIbRri8haZnuegArD/4Sb4tf3NF/KuHEZjhMNPkrVFF9mbQoVaivCNz2qivFh4m+LZIATRefauo+FXjTWPFkWr2/iKGCO80258ljB91qeHx+FxUnGhUUmuwp0alNXmrHoNFY3iDxXpXhuAtqNyqykZjgB+eT2ArjDd+LPH7FbKNtF0Zzhmk+Wcj1FdpkdB4j+IWl6G5trYNqN/nb9ltvmYH3rAi8N+JvG0i3Hii7OnWH3ora2OGYej11XhzwVpPhtFe3i8+7/iu5eZG+proaAM/R9C03QbXyNKtI7dD97YOWPqa0KKKACiiigArzXWoZPAPjGPXLRCdJ1B9l1En8Mh/jNelVT1bS7fWdLnsLxd0UyFT6jPcUAWYZo7iFJoHDxuMqy9CKfXn3gbVLnQ9Wn8H6y3zw5ayc/xxe/vXoNABRRRQAUUUUAFFFFABXP8AjLxKnhnQ2nTDXcx8u2jP8b9hW5c3EVpbSXFzII4o1LOzdAK868O28vjrxZJ4j1CM/wBm2bGOzhfozD/loKANzwD4afR9Me+1DL6lft50zP1TP8I9q62iigAooooAK5fxH4B0nXy06q1jfE5F3b/K9dRRQB5nHrninwLIlv4gtm1XSx8sdxbjMij1eupbx1oR8OXOsQ3sckNtGXkUNyp9DXQyRpLGySKGRhgg9CK+RPjz4i0iLxBJo/hRvs8a8XawHCO3cGgD1j4YfHa28Ya7c6Tq4jtpjIfsrdA654H1r2Wvzhtbqeyuo7m0laKaJtyOpwQa+vfgn8YIfGOmppGsyLHq1uoUEn/XDtj3oA7P4keIpNB8Lutm2L27PlW/+9WL4U0aPRPD0MKjEs/76Y/7Z61m+PZv7e+JelaPyF00i6Yf3s11jkFzgYHYV1UY6XMKr6DaKKK3MQooooAKKKKACiiigCpq2mwaxpFxYXS7o5Fzj3HSofhVrk89jdaBqLFr3TWwf9zotaQ4IrkLGb/hH/jNG6cDWwIyPpWNaN1c1pvWx69VPVNVstGsXu9RnSGJB1Y9T6Cs7xN4rsfDNoHuCZbmTiG2j5eQ+wrm9L8L6j4qvo9Z8YkiIENBYD7oHbcPWuQ6Cuq6z8R7rdIJNN0BW4H3ZJv/AKxrvtM0yz0ixSz0+BYYUHCqMfjVmONIo1jjUKijCqOgFOoAKKKKAOE+KOkzPpEGu6YhbUtMcPER2Un5q6nw/q0Ot6Da31u4dZIxuI/vY5/Wr08K3FvJDIMrIpUj2IrzrwFM3hvxZqnhO4Oy2RvMsi38eeTigD0miiigAooooAKz9d1SLR9Eur6ZwgijJUnu2OB+daFebfEG4fxD4n0rwhbkmC4bzrp0/g2nIBoAufC7TJW0658RaihXUNVctID2UHiu9qO2gS1to4IlCpGoUAe1SUAFUtW0iz1vT3s9RhWWJx3H3T6j3q7SMwRGY8ADJoA8r1HXbr4QRNJrE732gsT5JJzKp7CvFLn4+6zefE221mORotMik2C1zwUJ6n3rY+NNtr3i3XJvsms215awnEenwP8AOPqPWvENQ0m/0mURalaSWznoJBigD9CdG1a11zR7fUbCQSQXCBlYGuV+KfiGfSPDX2TTyDeXziFR3CngmvHP2b/iT9muj4T1WY+XJzasx4XHavQPFD/258YtOgJ/0fT4iHTsxPeqiruwm7K5teH9Hh0HQrexh52ruZj1JPNaNKetJXecgUUUUCCiiigAooooAKztf0mPW9CuLJ13OVJhP91+xrRpVbawYdqBlX4V6/Jqnh6XTrty93pT/Z5Wbqx55rua8j8NyHw/8aJNJi4h1S3a5cjpur1yuCSs7HWndXCiikZlRSzEADuakYtFY974u0DTiwvdWtodvXc/SsO7+KvhqIH7Fd/bz2FvzmgDtKK8zf4uzXEpg0zwrqsjnpK0fyUh1v4lauN2jadZ2i/9PYxQB6bUc9xFbRmSeRY0HUscV5t/wi3j7Vhu1rWIbdm+8LU4A+lPg+DkPm+ZfeJNVuieWR5floAo/ELW9D0rULfxRpus2sd9akLNHv5lj7ge9ayfGPQrjTYLuwguLzzV3FIVyV9jWpa/DHwtBzcaZFdnsZxuxXL6fplj8N/HD2ps410fVXzFKV/1cp/hHtQBYHxV1PUcrpPhXUY8cbriPANKbv4rajhrODTLWBv+ev3gK9NGMDHTtiigDzL/AIQLxVq/GteIZbYHr9kfFWbP4P2EJzfa1qWoe1xJmvRKKAOWtfht4TtgrHRbaaUf8tZFyxretNKsLAAWdpFDjpsXGKt0UAFfOmtfAV9c8e6hruv30dlohcu7Zw1fRdch8R/Ax8d+Hjp66hPZMMkeW2A/s3tQB85+Nfibo/h2yk8NfDS1S2iQeXNqAHzyfQ1594CspNe+I2lW85aU3F0PMZuc+pNXfGHwv1zwnr0umFPt7xp5jNbjdtX1Ndd+zbog1P4iyTSpxZxeZkjoc0Adt8TdJ/4R/wCJEEsSbbKa1WNP96scjBxXr/xk8Kvr/hVb61BNzpjeeir1fHavGbO6+2WiTYw+PnX+6fSuKvG0rnxPEGHcayrLZ6fMnooornPmgooooAKKKKACiiigAHr6c12HwN06S78VavrbL+4ZBHGfcda4TUbl7e2CwIZLiY7I0HVs8V9AfDPwqPCXgu2smJaWX99IT1y3OK6sPHXmPquHsO3OVd7bL9TrqKKK6z7EKKKKACiiigDy/wCIf/JWvAX/AF8vXqFeX/EP/krXgL/r5evUKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/wAl/wDCH/XpN/WvUK8v8Wf8l/8ACH/XpN/WvUKACiiigAooooAKKKKAPGviD/yXXw5/14yfzNbR61i/EEZ+O3hwD/nxk/ma3Chz2/OvzDiv/f1/hX6n0WWfwfmCffH1rx3TPiHrXhPxR4n0bw3pcl/eXl5v+QZKjHP869jRDvHTr61x/wAOPDaa5qPjKWCQ22ow6gPIuE6g4JAPtmujhD/ean+H9TPNP4cfUr6H4hs7CX7brXgvxBqeoMdxa4j3CM/7NdePjNIAAPBOugDoBF/9aug8NeLbn7cdD8UKLfVI+FkxhJx6iuxr9HPBPL/+Fzyf9CTr3/fr/wCtR/wueT/oSde/79f/AFq9QooA8v8A+Fzyf9CTr3/fr/61H/C55P8AoSde/wC/X/1q9QooA8v/AOFzyf8AQk69/wB+v/rUf8Lnk/6EnXv+/X/1q9QooA8v/wCFzyf9CTr3/fr/AOtR/wALnk/6EnXv+/X/ANavUKKAPCPGHjyfX0trrTvBeuQanaSB4pvKxkD+En0rY0j48jUbP5fCerz3EPyXIhjyEfuK9frzfWlm8BeLk1m0UnR9RfZdRIPuOf4zQBX/AOFzyf8AQk69/wB+v/rUf8Lnk/6EnXv+/X/1q9NhmjuIEmgcPG4yrL0Ip9AHl/8AwueT/oSde/79f/Wo/wCFzyf9CTr3/fr/AOtXqFFAHl//AAueT/oSde/79f8A1qP+Fzyf9CTr3/fr/wCtXqFc94z8Sr4a0RpYsNezny7WM873PSgDyPxb8XrrxQsei6R4Y1bG/N/GU+by+4FdJp/xYj0vT4bKy8Da6kECBEAi7Cuv8CeHZNI0przUSZNSvm864ZhypP8ACPauqoA8v/4XPJ/0JOvf9+v/AK1H/C55P+hJ17/v1/8AWr1CigDy/wD4XPJ/0JOvf9+v/rUf8Lnk/wChJ17/AL9f/Wr1CigDy/8A4XPJ/wBCTr3/AH6/+tR/wueT/oSde/79f/Wr1CigDy1/jJJJGyHwVrwDDGRFXyd44aGTxfezW9rcWqyyFzFc/fBPrX6B18a/FHwlrWtfEy/mis3S2Z+blhhFHqaAPJq0NDvNQ07WILzR2dbqFgylOtdDc6BoHh7Kazfi/mYZT7G2Qv1qHSWvfFWt2Oi6LapA8koRZI1+bb6k0Ae2fDXxTceOfiXfalfoElgsVjIH94CvXK840Dw5B4L+LMtlb8CXT0Eh/vPjk16QeDXZS+A5qnxCUUUVqZhRRRQAUUUUAFFFFABXlvxj16fwnr/hfXLKLzbiCRgqf3s16lXn/jrSofEnxB8K6Vc58oOxbHY9qzq/Ay4fEU/D/jQwXh1rXfBuuX2pynerGPckQ6jZXWf8Lnk/6EnXv+/X/wBatfwhr13p2rTeFvET/wClQnNrM3AmTsB7gV3NcR1Hl/8AwueT/oSde/79f/Wo/wCFzyf9CTr3/fr/AOtXqFFAHl//AAueT/oSde/79f8A1qP+Fzyf9CTr3/fr/wCtXqFFAHl//C55P+hJ17/v1/8AWrj/ABj8RLi71TTdds/CGsW0+nPlpJI8AqeoNfQFQXtql7Yz20ihllQqQR6igDzC0+OIvLVJ7fwdrcqMPvpFkE96m/4XPJ/0JOvf9+v/AK1XPhtey6Tf6j4Rv3Jk0+QtDI3/AC0Vjnj6V6JQB5f/AMLnk/6EnXv+/X/1qP8Ahc8n/Qk69/36/wDrV6hRQB5VdfHD7JbPNP4O1uJFGdzxYA+tcn4P+Is0Gral4gvPCOsXU+ovlHjjyqKOwrvviTfzalqGm+EbFysmouGldOqKp6H613ljaR2FhDawqFSJAoAHoKAPNv8Ahc8n/Qk69/36/wDrUf8AC55P+hJ17/v1/wDWr1CigDy//hc8n/Qk69/36/8ArVHP8ZpGt5FPgrXQChHMXtXqlRXWfsk2OuxsflQB8Aa9rNynjC+1Cw8+wkklLbc4ZfrUlv411BQ39opHqLHo1yNxFdRq3w38UeK/iBqC2GmTCJ5z+/dfk/Ou90r9nbSfD2mtqXxE1hIIIxuKxNgE+lAHkGmTwXWpw3ej29wuqJIJAsP3Vwf5V7x8MNXu/EviS/1HVABdw4RsV5p4o8e6TLNH4e8BabHZ2rSCL7Zt/evk4616V8N9Hl8LeIP7MuWYzTIHYt1bIzWlL40RP4T1M9aKU9aSu05QooooAKKKKACiiigAooooA8x8f+LbfwV8T9J1S5tpZv8AR9gMYrsI/GPjXXoUk8P6RHAkgyj3I4IrG1ewtdb+M+maTfQrNE1kZCrDOPet+ex8Q+ALlrnSnk1XRCcyWzcyR/7vtXFU+NnVD4URDR/ibqny6vqFhbR+lrwRSr8I57xxNqvijVDJ3jilwtdp4e8T6b4ls/O0+YeYv+thb78Z9CK2KzLOLs/hX4cgUC8ga/I6m4Od31rbs/B/h6wx9i0e0hx02RgVs0UANSNIkCxqFUdABinUUUAFFFFABWN4p8PQeJdCmsZQBJjdDJ3jfsRWzRQBxXw/8QzXME2hauSmpacfLIf70qD+Ou1rgvH2j3Gn3cPi3RVxd2ePtKjrJF3FdboesW2vaPb6haH5JkDbe6n0NAGhRRRQAUUUUAFYXi7xJF4Z0N7psNcSfJbxd5H9BW1PPFbQPNO4jjQZZmPAFecaLDL4+8YSa5eIRpGnvstIn/icfxigDY8DeFms9Mnv9aQT3+pHzJTKMlVP8H0rY0nwlouh6jNe6TYxWks4w/lrgGtqigBGVXQq4DKRgg96+ePiB4Mm8E66+oWStJo96+5+5ic9c+1fRFVdS0211bT5bK/iWWGVcMrCplFSVmc2Jw0MTSdKpsz5lBVlDIQysMgjvS1ueMfh/qngq6a50qKS/wBHkb7ijLxf/WrnLW9t71SbaQMR95e6n0rgnBwep+eY3L62Dlaauu/QnooxRWZ54UUUYzQAU2SVIImllYKijJJqC6v4LRhG7b5m4SJeSxrtPAnw0vvEtxHq3ieJrbT0OYbRuGc/7XtWkKbmz08DltbGS0Vo9yX4W+B59c1WPxNrMRjtIG/0OFhyx9T7V7mBgYFRwQRWtukFugjjjG1VUcAVJXfGKirI/Q6FCFCmqdNaIKKKKo2CiiigAooooA8v+If/ACVrwF/18vXqFeX/ABD/AOSteAv+vl69QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAPL/Fn/Jf/CH/AF6Tf1r1CvL/ABZ/yX/wh/16Tf1r1CgAooooAKKKKACiiigDifG3wv0rxxqlpqN7d3dpdWiGOOS2facGue/4UHpn/Qx61/3/AK9XoqJU4Sd5RT+RSlJbM8o/4UHpg6eI9a/7/wBdb4F+H+m+ArS7h0ye4nN3J5ssk7bmZq6qiiNOEfhSXohOTe7MPxN4XtPElntlzDcx8w3CcMh+vpWFoPie70bUBoHiw+XKPlt7s/dlHYZ9a7msvX/D9j4i09ra+jBP/LOQfeQ+oqxGpRXA6Pr194T1JNB8VEvbscWmoH7pHZWPrXeqwZQykEEZBHegBaKKKACiiigAooooAKp6tpdvrOlz2F4u6KZCp9R7irlFAHn3gbVLnQ9Xn8Iay3zwZayc/wAUXYfWvQa4/wAf+G5NU0+PU9MBTU9PbzYmT70gH8P0rS8IeJI/E2hJc8Lcx/Jcxj/lm/cUAb1FFFAEV1cw2drJc3MgjiiUs7noAK878NW0vjfxXJ4l1FD9gtGMdlC/3WI/5aCpfGV/P4o8QQeEdHY+XxJfSj7vl91+td3p1hBpenQ2VomyGFAiD2oAs0UUUAFFFFABRRRQAUUUUAFfPf7QnhDxBHBJr2i3sq6cFxc26Nj8a961LUrXSbCS8v5VihjGSSa4a1ttR+Id+LvUla18Pxk+TbHhpz6t7UAfD9fUH7Nnw8NlZP4p1KHEs3yQK45UD+Kl8Yfs5wXvjeyvtDKw6ZLKPtUPdR7V7zp2nwaXp0FlaIEhgQIoHoBQB5r8VbV9N1/Q9dtlwBP5d0/+xxXRrKs8azR8pINy/StjxVoEPibw5daZMdvnJhXHVT61wHgfVZriwl0fUFMd7pzGLY3UoOjV0UZdDGoup09FFFdJgFFFFABRRRQAUUUUAKODk9Bya5TwfF/b3xZ1W+kG+2sEX7O3+13q/wCLtbOh6E7248y9n/dwQjq+eDiui+H3hkeG/DEUcvzXNx++lY9ctzj8K560tLG1Na3Dxv4YfXLFLzTmEWqWR8y2l9/SpfBfihPEWl7J1MOoW37u4gb7ykcZ+hrpK8+8X6VdeHNaTxbocZYKQL+BOsqetcxueg0VS0jVbXWtLhvrGQSRSrnI7HuKu0AFFFFABRRRQB5x8RbWTQ9d0vxhaqfLs38u5RP4wxwCa9BtLmO8s4rmFgySoGBHuKr6zpkWr6Pc2U6hllQgA+uOK474X6lLBbXfhjUmJvtLchie6k8UAd/UV1cJaWc1xIQFiQuSfYZqWuB+KGpzyWtn4b01yt9qbjbj+4DzQBV+Hdu/iDXdS8XXY3pcvstA3/LMDg4r0iqGi6XBo2jW1jaoESJAMD171foAKKKKACgjIwehoooA5vxl4iXwX4YuNTt9OkuigJ8uBe/qa+MPHnxK13x5qLy6nO0dvn5LZDhQPpX3ZetbpYzNeBTAqEyb+mO9fM3ib4Pt8QbzU9e8IQx2dqjHyI8YE2OuKAPM/hB4d/4SX4ladZsMojeax7DbzX0r8RIDofjzQtfUbLZU8iX0JPArj/2b/Al9o2p6rf63ZPbXMDeVGJBgkd8V7P428OR+J/DFxZOP3qjzIT6OORTTs7iaurFA8gN/eAb86Sue8F6xJqWlPZX3Go2B8u4U9uwroa707q5yNWdgooopiCiiigAooooAKVdobLnCjqTSVz3jTV207RTaWuWvL8+RGq9Vz/FSbsrjSvoQfDqH+3/H+r67MCfsDm1iY9wc9K9Y6jBrn/BPh0eGvC9rZOAbjbuncdXY+tdBXA3d3OtKyscV4g8ArNef2t4ZmOnaknzbUOI5T/tCmaB4+Zbv+yvF0H9nagp2iVhiOY/7NdxWVr/hvTfEdkbfUoFc4wkoHzRn1BpDNUEEAg5B70V5ql14h+Hcyx3wfVdBBwki8yxD1b2rvNJ1mw1yxW70y4SeI9Sp6H0NAF6iiigAooooAKKKKAGyRpLG0cqhkYYKnoRXm2nu/wAPfGr6dOx/sXVJN8Lt/BKf4R7V6XWL4r8Ow+JdClspAFlxugkPWN+xFAG1RXGfD/xFNeW0uiasSmp6cfLYP96VR/HXZ0AFFFc/4y8Sp4a0Rpkw13MfLto/779hQBzvjfU7jX9Zh8H6O3MuHvpB/DF6D3rt9L0230jTILGzXbFCgVfU47mud8B+Gn0jTXv9RzJqd8fNmd/vLn+H6V1tABRRRQAUUUUANkjSWNkkUMjDBUjgivPPFXwb0PXpjdafu0256/uPlVj7ivRaKNyZRjJWkro+db/4W+ONI3GGW2vbfPyhB8341gzWHiu1J87w1eShTg+WnWvqiisnRg+h5dTJ8FUd3C3ofLUGk+LrxlWHw9cw7+hkXp9a6HT/AIR+MtWZW1K7t7S1PVV4evoSihUoLoVSynB03dQv6nC+E/hNoHhn968Z1C56+bc/MVPtXdAAAADAFFFanppKKsgooooGFFFFABRRRQAUUUUAeX/EP/krXgL/AK+Xr1CvL/iH/wAla8Bf9fL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABXFal8UNI0fV4tM1GGaC5mYLGjfxZrta82+M3gzT9e8Lvqsji21DTv3tvcDg5HagDp9b8Y2eh3dla3EEsk17/qlTvW/DJ50KyFSm4Z2nqK8f8Agzqp8Zo+o6+6y6lY4jjjbqgHGcV7HQAUUUUAFFFFABRRWJr/AIx0LwyAus6jDbSsMxxufmf2FAHE+KyD+0B4RAOStpNn2616jXnHhOwuvEvjO48W6jbNBAg2WCv97ae9ej0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAUNZ0az13TZLLUIg8bjg91PqPeuM0/VtR8Dagmk+IGafSnbbaXnXYOwY16FVTU9MtdXsJLS+iWWJxjBHQ+o96ALMciTRrJEwdGGQwPBFOrzqG41D4dXwt71pLzQZW+SY8tB9favQLW6gvbZLi1lWWGQZR1PBFAEtFFFABRRRQAUUUUAFea6zDJ4B8Yx63aITpOoP5d1EnRHP8Zr0qqeraXbazpc9heLuimQqfUe4oAswzR3MCTQOJI3GVZehFc/418TDw3o26H5r25PlWqernpXOeENcfwtd3nhnxHMI1s1MlrM/Qw9s+9J4ZtJvGniuXxNqcZ+w2xMdlC4+VsfxigDd8B+GW0LSWub7L6let51w7dVJ/hHtXVUUUAFFFFABRRRQAUUUUAFZ2t65ZaBp7XeoShFHCr3c+gqLxF4jsvDmnm4vG3SNxFAv3pW9BXMaJ4dvvEuoLr3ixcL1trE/djHYketAEWn6PqHjjUU1bxCHg0yNs21l03+7D0r0CONIYljiUIijCqBgAUqqEUKoAAGAB2paACiiigArz/wAd+GLmG7XxN4eQ/bIObiJP+WyelegUU07O6E1c8+0TXLXXrET2rgSrxLEeqN3FaNUfE/gKUXja14TcWl+vzND0jl+o9awrDxzHHP8AY/FNrJpN0Dt3zDCyH2rrhUUtznlBrY6uimwSxXUIltpUkjPRgafg1qZiUUuDSOVijMkjqqL1JI4oAKrajqVppFk11fzLFGo4z3PpWFqnjmxtpDbaNE+rXp48m3GSp96u6B4Hv9cvI9Y8aMHxhobJfuqP9oVnOoomkYNkfg7QrrxPrI8Ua5E0dvE3+gW79R/tEe9em01EWONUjUKqjAA6AU6uNtt3Z0JW0CmyRrLG0cihkYYIPcU6ikM81Qy/DnxWEfJ0DUX+U/w2z98/WvSUdZI1dGDKwyCO4qjrej22u6TNYXqBo5B+R7GuQ8GaxdaNqknhPX3PnwnNpO3SZewH0oA76iiigAooooAK808Zxt4S8aWHiqH5LKVvL1Ajvnha9LrI8UaHB4i8O3Wn3Kblddyj/aHI/WgDS+0x/YvtWf3Xl+Zn2xmvOvBkTeK/G+o+J7keZZwt5ensf4ezVzreM7//AIQo+FBIT4jWTyTH38vOP5V6v4Z0WDw/4etdPtk2KiAsP9o8n9aANWiiigAooooAKKK4/wAc+JZ7KOPRdF/earffKijnYp4JoAyvEl9ceNPEH/CMaTIyWULA386dsfw59672wsLfTLCKzs4lihiXaqqMCsrwl4ag8M6Mlsn7y4f555m+87H1NbtACBFViyqAT1IHWloooA858b+HbnStTXxR4fj/AHq/8fcCD/WjuxqzpGsWeuWIurCUOOjr3Q+ld4yh1KsAVIwQe4rz3xJ4DurK8bWvBjrbXQ5ktT/q5B349a2p1OXR7Gc4c2qNOiuX03xzatN9j8QQSaTeDjE4wHPqK6eJ0niEsMiujdCCOa6k09jnaa3FopcGjBpiEopJXSGIyTOqIOpJFcvqPjq0WX7L4ehfV7wnaVgGfLPvSbS3Gk3sburavaaJYPd30gUAfIndz6Cqngrw5c6zqh8VeIYyrkYs7dv4E9SPWn+G/At5qF+mteM3Weccw2g+5GPcetehqoRQqgAAYAHauWpU5tFsdEIW1YtFFFYmgUUUUANliSaJopUDo4wysMgiuC1fwPe6PfnWPBE4tphzJZsf3Tjvgetd/RQByXhnx5a6xOdP1OF9O1NODBNxv919q62uf8TeDtO8TQ5nDQXS/cuoeHX8a5iz8Sa14Jul07xbGbnTxxDqEY4Rf9s+tAHo9FQWV7bajaJc2MyTwyDKuhyDU9ABRRRQAUUUUAcD490i50y+h8XaIuLm1wLpR/y0i7j611+i6vb65pEGoWhzHMgbb3U+hq7JGk0bRyqHRhgqRwRXm2nPJ8PvGz6ZMx/sXVH3QO38Ep/hHtQB6Nc3MNpbSXFzII4o13O7dAK878P20vjrxVJ4h1CMjTbN/Ls4X6MR/GKm8ZahceJ9eh8IaO2EOJL6UH5fL7rn1rudN0+30rTYLK0XbDAgRR7CgC1RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX/ABD/AOSteAv+vl69Qry/4h/8la8Bf9fL16hQAUUUUAFFFcF4h+K9h4b1+LSr/TLvzZmCxuB8r/SgDvaKo3Op/Z9FbUPs8jhY/M8pfvYxmsvwh4wh8YWUl3a2U9tCjFQ0w+8R1xQB0VFFFABRRRQAV55eT6j4r8bRabeaTcQaRa/NI8g+SY16HRQB4d420PXfBvxMsfEHgXSZZ7ecBbyCFfkI+lbnii8vb7xf4XdJrqwW/Uie2LYxXqtcp4k8KXOs+JtL1WCdIzp5yqnvnrQBxtro1zD4/v8Aw8NXvXsbtdxLSfMh/wBk1tfDa8u49Z13RJrmS5t9PlCwySnLEH1rVXwper46bXftEfllNuzHOaf4X8KXOha7qmozzpIdQfcwXtQB1dFFFABXO+KvA2heMYVTWrNZXQfJIPvL9DXRUUAec+DtRuPDPiqbwZqE5ngA3ae7/e2DqDXo1eXeKgF/aA8JFBtLWs27HfrXqNABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBDd2kF9avb3cSyxSDDIwyDXASR6h8N78zW6yXvh+ZvnjHLWx9R/s16LTJYo54XimQPG4wysMgigCKxv7bUrOO6sZlmhkGVZTVivO7yw1H4fag9/oyNdaHK2bi1HLRH1X2rt9J1a01rT47ywlEkbjt1B9DQBdooooAKKKKACiiub8beJh4c0b/R/mv7o+Vap6uelAHAfFm3l8Y6imjeHI919ZL588yfxqP+Wea77wFq1nqfhW2SzAje1UQzRDjy3HUVH4F8MnQdJM95l9SvT51y7dQx7D2rntahk8A+MY9bs0J0nUH8u6iTojn+M0AelUUyGaO4gSaBw8bjcrL0Ip9ABRRRQAUUUUAFYfibxRaeG7IPKDNcyfLDbp96Rqg8U+LoNBRbW3X7TqU3ENunJ+p9qoeGfCU320674lf7TqUvKIeVhHbA9aAIvDvhi91LUR4h8XES3jc29t/BAvbj1rt6KKACiiigAooooAKKKKACs3WPD2l69B5eqWcU+BhWZeV+hrSooA8wvPg8lrK114d1e7t5z0ikkzGPwqk3hD4m2vOn6zpzMevmqTxXrlFUpSWzE4pnkY8KfFG741DWNNVV+75SkVZtvhHdamyzeJ9buXkU/6u2fah+tep0UOUnuxKKRi6F4S0fw6g/s6zjSXGDMR87fU1tUUVJQUUUUAFFFFABXMeNvDDa9p63Fiwi1O0+e3l7gjt+NdPRQBzPgrxOPEGlmK6Uw6ja/u7iBvvAjjP4101cD4w0i70LVl8WaDGWeM/wCmQJ1mX1/Cuv0XWLXXdJh1CxkDxSjt2PcUAX6KKKACkZgiFmOFUZJ9KWuQ+JXiCXQ/Ckq2Pz3t0whijHUhuCRQB5Qtwi/tBv4mMIOkf6oT44LdK+hUYOispyGGRXBN4Btv+FXDRgT5qp9o3/xb/vfzrR+HHiCTXfCkQvfkvbYmKWM9RjgE0AdbRRRQAUUVDeXcNjZy3Ny4SOJSzEn0oAzPFHiK38NaJJe3By/3Yk7s56frWH4G8O3CPL4i14b9Vvjuw3/LFewX0rN0K1m8eeJD4g1NT/ZVqxWxgYcP/tEV6OBgYHSgAooooAKKKKACiiigDJ1vwxpPiCIrqdnHK+MLIV+Zfoa4S6+EEmnu1z4Z1m6imY58ueTKD6CvUaKabWwrXPIz4T+KFr/x4axppB6+apNA8J/E+6/4/wDWNNXHTylIr1yiq55dxcsex5dbfB9791uPEmtXcswPMcEmEP4V3ei+GNI8PxBdLsooXxhpAvzN9TWtRUtt7lWsFFFFIAooooAKKKKACiiigAqG7s7e/tmt72FJ4X4ZHGQamooA85vfDGteDbl9R8HSmezJ3TWEhzx6IK6Xwz4z07xKnlxE216g/eWkvDpXQ1yfifwJZ60323T3On6nGdyTw/LvPo3qKAOsorz/AEvxvfaHfLpHjeAwSZ2pfgfupPb6130UqTRLJEwdGGVYHgigB1FFFABXFfFKWy/4RU286hr6Y7bHH3hL2IrsLu6hsbSW5upBHDEpZ3PQAV554XtZvGniqTxRqUZFlbExWULjhsf8tBQBT+CKiz0e5sdYDprySFrgT/6xh6j2r1SuA8eaTc6VqMPi/Rh/pFrgXaD/AJaRDt9a7HRtWt9c0mC/szmOZA2O6+xoAvUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHF/EHw3c6mtjrOljdqGkuZYV/veopPC3xQ0LxFcx6c832XVSdjWkn3tw64qH4ka7dW8mmeHtOkMN1rMhiSRTyoHWtXw94C0Pw+I54bKOW+UZa7kXMhbuc0AdNRRRQAV518ZvDA1jwkdTtUY6hphE0JTrxyRXotMmiSeB4pQGR1KsD3BoA8stfHc3iX4WWT6dKo1a7Ih8peoxw1ejaFpkOkaNb2kCbFVAWH+0ev615F4A8Btpvxe1eeKfOm2LbrdB0y3Wu5+KKTr4Qe5s7qW2mjkQK0bYzk80AdpRXkOtWF94R1vQ9W07VLmZbkKtxBM+5WyByBXrcT+ZCjnqyg0APooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/yX/wAIf9ek39a9Qry/xZ/yX/wh/wBek39a9QoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigBHRZEZJFDKwwQRwRXAaroWoeENRfWvDIMlox3XNl1GO5UV6BQQCCCMg9RQBmaDr9l4h05buwfPZ4z96M+hrTrhde8NXmh6i3iDwkNso5uLMfdlXvgetdD4b8S2XiSw861OyZOJoG+9E3oaANmiiigCG8u4bCzlurpxHDCpZ2PYCvPvC1pN4z8Uy+KdSjP2O3JisYnHysB0cCneLr6fxZ4kg8J6S+IFIkvph93b3T613thYwabYQ2dogSGFQqKOwoAsVT1bS7bWdLnsL1d0M6FW9R9KuUUAefeBtUudD1afwhrTfvIMvZuejRdh9a9BrkPH/huTVNPj1PTAV1PT286Ip1kx/CfatHwh4kj8TaElzkC5j/d3MY/gcdRQBvUUUUAFcr4q8XHS5F0zR4jeavPwkSc+V/tN7VD4n8WzRXY0Tw3H9q1WbjcOVgH95queFfCUWgRtc3T/AGrU5+Z7huTnuB7UAQ+FvCA01zqWsOLvVZvmeVufL9lrqqKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAR0WRGRxlWGCD3FebN5nw38VhsH/hH9RfBI+7bN/9evSqo6zpNtrmkzWF6geKVcYPY9jQBcR1kjV42DKwBUjuKdXAeDdWutB1iTwlr7nzE5sp36TL6D6V39AASACScAda8ztwfGfxYkmk+ax0HiIj7sjH+ddN4+8QHw74TubmLDTyfuo07ktxxSfD/wAP/wDCP+E7eGT5ribMsrnqS3ODQB0+BjGOPSvM5c+C/iyjJxY6/wAyE/diYfyzXplct8QvD/8Ab/hSdIvluIMTRuv3ht5wKAOpBBAI5Borm/AfiD/hIvCdtdSYWZR5cidwV45rpKAAkAEk4ArzjW7qfx94kPh/T3ZNJtHBvpl/jI6AH0rS8b+IroSR+HfD536ne/KzjkRJ3z74rd8MeHLXwzosVlbAs+Myyt952PUk0AaVpaQWNpHbWsaxxRqFVVGABU1FFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAFLVdIsdasmtdSt0njPQMM7T6j3rgnsPEHw7uDLpRk1XQycvbt80sf0PpXpVBAIIIyD1FAGR4f8Tab4lsvP02dWZeJIj96M+hFa9cRr/gJje/2t4TnGnakvO1eIpD/tCsS9+KV3puntpOrWTWmvyfuYCwwkr9mHtQBd8W30/i3xJD4U0l8QIRJfTD7pTun1rvbCxg02whs7RAkMKhUUdhWB4H8MDQNIMt0fM1G8PnXMh5+Y9h7V09ADZYkmiaOVQ6MMMpHBFebaZJJ8PfGbaVcMf7F1N91vI38Ep/hHtXpdYnizw7F4l0KWzcBZwN0EneN+xFAG3RXG/D7xFLf2Mmk6qSmpaefKdX+9Io6NXZUAFFFFABRRRQAUUUUAFFFGR60AFFGRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/xD/5K34B/wCvl69Qry/4h/8AJWvAX/Xy9eoUAFFFFABXl3jLXPiPb+L/ALB4b06GXS5QFE7Jkrnqc16jRQBgeD9Ak0DQ0gu5BNduS80v94ntWJ8X5ok8ATxyTeU8ksew++a7qq95YWmoRiO9t450ByFkXIzQBymm+GbnVn0u+16eOdLOJTAkY4PA5NdmAAABwBTURY0VI1CqowAOgFOoAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8AJf8Awh/16Tf1r1CvL/Fn/Jf/AAh/16Tf1r1CgAooooAKKKKACiiigApkzMkDtGu5gpIHqafRQBwngPxR4l1vWdVtvEOktZwW8pW3k243Cu7oxRQAUUUUAFFFFABRRRQAUUUUAFcV4k8K3Vrf/wDCQeFSIL9OZoB92de/HrXa0UAYHhfxXbeIrYqVNvexcTW78MpqDxx4m/4R7Rttt81/dnybZR/fPQn2qj4x8NpEH8RaXOLG9tFMjsOFkA5Oa5P4c6mPHviVtc8QOq3dqpjtrRhhWX/noBQB3Pgbwz/YGj+ZefPqN2fNupD/AHj2HtXT0UUAFFFFABXmuswSeAfGMet2aE6TqD+XdRJ0jc/xmvSqpavpltrGlXFjfKDBMhVvb3oAtQzx3ECTQOHjcblYdCK4vxD4qutQvzoPhPEt23yzXA5WEd/xrh9D8S6nc6jc/D7TrsEwSFRe9li/ug+teseHvDll4csBBZplzzJM33nPqTQBB4X8K2vhy0OCZ7yX5prh+WY9+fSt6iigAooooAKKKKACiiigAooooAKKKKACiiigAoPSiigDgdA8VeJ734iahpOp6Q0OlxDMNxt+9+Nd9RjnNFABRRRQAUUUUAFFFFABRRRQAUUUUAcz418L/wDCQaasto3lajanfbTDqCOcfjSeCfE/9u2D2t6ph1Oz+S4gbqMcZ/GunrzL4owP4Ut5fGmkuIZ4EKToP+W2eAT64oAL8nxl8VobNfnsdE+aZf4XY9M16aAFAAGAOAK4L4Q28MngqHVt4mu9QJkml7k56V3tABSEBlKsMgjBFLRQB5npZPg34q3OnyfLZa2d9sv8KEda6/xb4lh8NaM9ww8y4f5IIl6sx6fhWH8V9NE3hJ9UgdYbzT2Escp6gA5IH1rJ8CRy/EC4g8XatzaxL5dlbnsRwWYUAb/gbw1NYxS6zrR83Vr875GPRB2A9K7Cjp0ooAKKKKACiiigAooooAKKKKACiiigAPArgPCvirxRqnjzVNN1bSTb6ZbsRBcbcb67+jAHQUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBBfXsGnWM15duI4YULux7AV5hpvhmP4lavceItdieO2CmGxC8Hb2cVf8U3k/jLxRD4X0pz9jgIkvpx90gdY69Bs7OGwsorS1QJDCoVFHYUAedpfa/8ADuZYNSSXVdEJws68vCPVjXe6Vq9jrVkt3plwlxC3RlNWpYY54WimRZI3GGVhkEVwWq+Cb/Q75tX8Dzi3l6y2b8xuvoo9aAPQKK5Pwz47tNZm/s/UUOn6qnDW03Bb3FdZQB5/460q40fU4PF2jKRLb4F4i/xxf412ekarb61pUF/aNmOZQ2O49jVqWKOeFopkDo4wykcEV5xpMsnw/wDGL6RdMf7I1J99tI3RJD/DQB6VRRRQAUUVT1XVbTRtOlvtQmWGCJcszGgC1JIkUbSSMFRRksTgCvNvFPxp0bR5Ta6NE+r3H3WMH3Yz71594w8eap45umgs3l0/R42+XacPL759KwrW0gslxbRhCerY5b61zzrKOiPnsdndPDt06S5pfgbeo/EnxzrZdY5beytG6Kq4cfjWHcXXiS9INzrlwpHTy3IqcnPWiud1ZvqfOVM5xs38VvQit77xNZLtttbnZf8Apo+a6DTfip410Z0XUBb39onZF+c/jWJQCR0pqtNdR0s6xlN6yv6nsnhP4vaF4kZbe7zpd4x2rBcHBc+1d+CCAQcg9xXypdWNveENIu2UfdlXhl+hrrvA3xJvvClzHpfiF3u9Mc7Y7knLRH/aPpXRCspaM+lwGc0sS1TqLll+DPfaKitrmG8to7i2kWSKQblZTkEVLW57wUUUUAFFFFABRRRQB5f8Q/8AkrXgL/r5evUK8v8AiH/yVrwF/wBfL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX+LP+S/+EP8Ar0m/rXqFeX+LP+S/+EP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFcx458Tf8I/o2y1+a/uz5Nso7MehPtQBgeK72fxf4mg8LaQ5FtEfMvp1+7gdUrY13wLa3NjbtoeLC+sh/o8kfGcdm9RVjwP4Z/wCEe0bddfPqF2fNupD3c+ldNQByfhbxc9/O2ka7H9k1eDhkbgTD+8vtXWVz3irwnB4hgWaFvs2owfNBcpwQfQ+orO8NeLLhb06F4nT7PqUXCyHhZx6igDsqKKjuLiK1t3nuJFjijG5mY8AUAOd1jQvIwVVGST0FcBqes6h411B9G8Ns0GnocXd92Yd1U+tR3V7qPxCvmstLZ7XQ42xNcdDN7LXc6VpVpo2nx2dhEI4oxjgcn3PvQBxviXwHBa+HYJvDkRi1DTj50bp9+cjsx710Hg/xJH4m0JLkEC5j/d3Mf9xx1Fb1ea61DJ4B8Yx63ZoTpOoOI7qJOkbH+M0AelUUyCaO5gSaBw8bjcrDoRT6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArzPxMx8YfEWz8PxfvdPsctqMfY56ZrufEesQaFoF1f3L7FjQgH/aPT9a5j4X6PPBosus6ohGpak5eVz3XPy0AZ0iP8NvE6SRgjw9fMFYD7ts3b869JjkWWJZI2DI4DKR3Bqpq+lW2taXNY3sYeKVcYI6Hsa4zwfqlz4c1l/CWuyEkEmxnf/lqvpn2oA9ApGYIpZjgAZJPalrhvGuvXV7eR+FvDrZvrrieYdIE759zQBn6nJL8R/Ecmj27MugWZxdSKf8AXt6A0zwmx8HePbzw3OTHZXeG01OwA613Hh3QbXw5o8VhZrwgyzHqzdzXL/E/SJ30+21/S0J1DTHDKR2TPzUAd5RWdoGrwa7odtqFq++OVBz79/1rRoAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuW8d+JjoGjiGz+bUb0+TbKOzHufaug1C/g0zT5r27cJDChdyfSuE8H2E/inxBN4t1dD5QzHYxMPl2dmx60Ab/gjwyPDuijz/nv7o+bdSHu5610tFFABRRRQBzvibwZpviSPfIpt7xeUuYvlcH3PpXN2XifWfBl0um+MImuLLIEWpRj5VXsG969GqC8srbULVre9hSaJ+qOMigBbS7t761S4tJVlikGVdTkEVk+LfDkXiXQ5LVsLcL81vL/zzfsa5O78Na14IuH1DwjI93Yk7prCQ7ifZPSqXiz432Gh+FDfWtnI9+fla2brC3+1QBseFvHNrZ6HNbeKrlLK704mOQzHBkUfxUeCfixpHjvxHfaZo6MVtF3eaTw49RXx14t8a6t4x1qXUdTl2vKMFI+Bj0rtv2eNaGk/EqOItgXieVj1oA+yndYo2eRgqqMkntXzv4+8Xz+N9feytnZNHsn2lR/y1cdc+or0P4z+KZNC8LJp9pn7Rqj+QGU8oD3rxyztRZWiQA5YD5m/vH1rnrT5VZHz+d454ekqUH70vyJwAqhVGFHAA7UUUVxHwgUUUUAFFFFABTZI0miaKVQyMMEGnUUBsdZ8LvGs/h7WIvDWqymSyumxaSMeUPpn0r3evlHUrZrm0zCxSeI743HVSOa9/wDhl4p/4SvwVbXbgiaL9zID1yvGa76M+ZWZ+gZNjniqPLP4onX0UUVse2FFFFABRRRQB5f8Q/8AkrXgL/r5evUK8v8AiH/yVrwF/wBfL16hQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAeX+LP+S/+EP8Ar0m/rXqFeX+LP+S/+EP+vSb+teoUAFFFFABRRRQAUUUUAFFFFABRTEmjkZljkVivDBTnFPoAKKKKACiiigAooooAKKKKAIL69g06xmu7pwkMKF3Y9gK4DwlZTeLvE03izVEP2aPMVhGw+Up/fx60nim7m8Z+JovC+lORaQMJL6dfukDqleg2dnBYWUVraoI4Yl2oo7CgCaiiigArE8S+F7TxHZbJv3VzHzFOnDKfr6Vt1U1PVLTR7CS7v5ViiQZJPegDi9L8XT+F5W0rxvKsKxD9zfNwjr2H1qErqHxG1DLCSz8Pwtlexuff6VTvPD0vxcJk16BrXQYz/o8WMSSH+/n0rQ0TWbnwVexeHvEQCWPCWN4B8uOyn3oA7qysrfT7RLaziWKJBgKoxU9IrBlDKQQehFLQAVT1bS7bWdLnsL1N0M6lW9RVyigDz7wNqlzoerz+ENafMkHz2ch6NF2H1r0GuQ8f+G5dUsItT0sFdT09vOhKcGTH8J9q0fB/iSLxLoSXIIFzH+7uI+myQdRQBvUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFME0bSGNZFLjqobkfhT6ACiiigAooooAKKKKACiiigAooqnq2oQ6VpVxeXEiokSE5b1xwKAOB8cyv4m8Z6X4Wtz5lmG8y/C/wgcjNejwxJBBHDGMJGoVR7CuB+F2nzXUN74o1JCt9qchBDdkB4xXoNABXOeM/DCeI9J/cnyr63O+3mX7ykc4z710dU9W1S10bTJr6+kEcUS5ye57CgDgbf4lG38NS2d5ET4jh/c/Yh94noG+net/wR4Xk0e0fUNUYTate/PPJ6Z6AV50+havfay3xMitQJoz+6sivzGMdWP4V6/oOt2viDR4dQsnDpIOR/dbuKANGo7mBLq1lgkGUlQo30IxUlFAHm/gGdvDPiTUfCN0dkEbl7Et/GDycV6RXnnxNsZdOudO8XWCF7nTXCFAPvKx5JrudNvotS02C7t3DpKgbI+lAFqiiigAooooAKKKKACiiigAooooAKKKYk8UjsiSIzL1UMCRQA+iiigAooooAKKKKACiiigAooooAKKKKACiiuT8eeJm0XS1s7AGTUr4+TAq9UJ/iPtQBh+IrqXxv4sj8N6c5/s61PmXs6/dYj+CvQ7W1hsrSK2tkCRRKFRR2FYfgzwyvhrRBHKQ97OfMupP77nrXQ0AFFFFABRRRQAUUUUAIzBELMcADJNfGfx28Z22v+MpbTSLcW9vbEpIyrt81+5NfZpAYEEZB6iuV8QfDbwt4liZNQ0uEFurxKFY/jQB8DVv+BNQ/srx3pF7u2iG5Via+hPE37Lum3ILeGr5rVuu2YlhXlHiH4F+MvDU5khtReRxnPmxMOMd+tAHpXxJ1Y+IPiPAqNus4rRZEH+1WSTk5rH0m8lv75ZLlSs0MAicH1FbFcFZ3mz89zuo542S7WQUUUVieMFFFFABRRRQAUUUUAA9PXiuu+B2oSWfizV9FZv3GwSRjtnvXIjrWr8OLj7L8Vo1zjz1C/Wt6DtM97Iajji+Xuj6OoooruPvQooooAKKKKAPL/iH/wAla8Bf9fL16hXl/wAQ/wDkrXgL/r5evUKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/yX/wh/wBek39a9Qry/wAWf8l/8If9ek39a9QoAKKKKACiiigAooooAKZNGZYHjB2llIyO1PooA4fwN4BuvCmsape3eqy3ovZC6I7EiMeldxRRQAUUUUAFFFFABRRRQAVy/jrxKdB0fybT59QvD5Nui9QT/F9K6C/vYdNsJry6cJDChdifQVwXhGxn8WeJJvFuqofITMdhGw42f3setAHQ+CfDQ8O6L/pHz39yfNupD3c10lFFABRRWR4i8SWPhvTzcXr5dvlihX70jdgKAJtc1yy0DTnu7+QKBwq93PoK47TdG1HxrqCat4jDQ6ajbray6Z9C1TaJ4bvfEWpL4g8WA5621kfuxr2yPWu7VQqhVAAHAA7UAJHGkUaxxKERRgKowAKp6xo1lrmnvZ6hCJI3HBxyp9Qexq9RQB55p+q6h4D1BNK14vcaU5xbXvXZ6K1egRSxzxLLC4dGGVYHgioNR0211WxktL6JZYpBggjp7iuDin1L4dXwhu2e70CVsJJ1a3+vtQB6NRUVrdQ3ltHcWsiyxSDcrKeCKloAK811mCTwD4yj1uzQnSdQYR3US9ImPVzXpVU9W0u21rS57C9TdDOu1vUUAWYJ47mBJoHDxyAMrDoRT68+8D6pc6Jq8/hDWnzLB89pIejRdh9a9BoAKKKKACiiigAooooAKKKKACiiigAoPIoooA4TQfh9d6T8QL/xDPq008NyMJbFjhK7uiigAooooAKKKKACiiigAooooAK84+I9zJrmsaX4RtWJjvZN10ydYwDkZr0C9uo7KxmuZmCpEhYk+wrgPhrayazqOpeLr1CJL5zHErD7iqeooA9As7aOzsobaIAJEgQAD0GKmoooAR3WONnchVUZJPYV5vI0vxI8UGGMkeHtPf527XLj0+lW/GWsXWt6onhPw+5E8p/0u4XpAvofrXXaHo1roOkw2FkgWOMc47nuaALqQRx24gVAIwu0L2x6V5xdJJ8OfFn2yIH+wdSfEwA+W3bt+delVU1TTLbV9Nmsr2MSRSrggjp70AWYpUmiSWJgyOAykdwadXnvhTUrnwtrx8J61IxhYk6fO/8Ay0X0zXoVAFbULKPUdOntJgCk0ZQ57ZGM1wnw0vZNKvdS8IXZOdMk/cu3WRSc8V6JXm/xFgbw5rmm+MbVTtt3ENxGv8YY4yaAPSKKitp1ubWKeMgrIgYEe4qWgAooooAKKKKACiiigAooooACMjFcH4X+H15oHjnU9dn1eW5hvGJS3ZjhK7yigAooooAKKKKACiiigAooooAKKKKACiiigCrqeo2+k6bPfXjhIYELMTXDeC9NuPEevT+L9ZQ/PmOwjYcCLs2PWoNenk8eeME0Cyc/2VYsJLuZejOP4D616NBBFa26QQIEjjXaqqOAKAJKKKKACiiigAooooAKKKKAGuxSNmC7iBnA714p4n/aU0fQdRuNPh0u4nuYGKNzgAivYNU1ex0Wye71K4SCFOrMa+RfjdpP9qa03ijQ9Ikg0mb5WuNuBI396gDQ1v8Aab8U3bldHjhtIz/eQE1f+G1l43+LOuC91rVbqDRoT+9KMVEp/uiuA+Fvw0vfiBr6R7Gj0+I5nmxxj0Ffa2haHY+HdGg03TIVighUKAoxn3NAHzbrOmxaL4/vdOgBEaLlc9SKK6P4x6Y+k/EO11rG2G9iEHsWrnSMEiuCsrTPz7PKbhjG+j1EooorE8UKKKKACiiigAooooAB1rZ+F9sLv4qOxGfs6BvpWN0BPoM12/wG0prm+1XxI3KXH7lPbaa3oK87n0GQU3LFOfRI9soooruPuwooooAKKKKAPL/iH/yVrwF/18vXqFeX/EP/AJK14C/6+Xr1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8AFn/Jf/CH/XpN/WvUK8v8Wf8AJf8Awh/16Tf1r1CgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKK5Xx54mOhaQLezBk1C9PkwIvVSeN30FAGF4lu5vGviqPwzpjn7BbHzL2demR/BXoNpaQ2NpFa2qCOGJdqKOwrC8FeGR4b0ULOQ9/cnzLqX+89dHQAUUVzfinxbFoaLaWifatSn4hgTk/U0AT+JvFNp4btAzgz3UnENsn3pG9KxvDnhe71DUP+Eh8Wfvbx+YLVuVtx6Y9al8MeEp1uzrfiVxdanJyqnlYR6AetdjQAUUUUAFFFFABUV1awXts9vdRrLE4wysMg1LRQB51JBffDe/M9qJLvw7M2ZUPLWx9fpXe2F/banZR3djMs0Eo3KympZYo54WimRZI3GGVhkEV59e6fqHw/wBQfUdFje60aVs3FoOTD6sPagD0SiqWk6vZ61p6XlhKJI3HbqD6GrtAHIeP/DcuqWEWp6WCuqac3nQlOC+P4T7Vo+D/ABJF4m0JLkEC4j/d3Cf3JB1Fb1ea6zBJ4C8ZR63ZoTpOoMI7qJekTd3NAHpVFMgnjuYEmgcPHIoZWHQin0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFMnlWCCSaQ4WNSxPsBQBwHxPv5b37B4WsnIuNUkAYr1VQa7fS7CLTNLt7OBAiRRhcD1xya4DwJC3ibxbqfiy4G+1ZvKst38G04OK9KoAK5jxr4o/sHTlt7NfP1O7/d20C9STxn8K19c1m00HSJtQvpAkca8Z/iPYVyXgvRrvV9Sk8Wa/GVnn/49YH/5Yp2I+tAGv4K8LnQNOaa8bztSuvnuJj1JPOPwrpqKKACiiigDnvGPhmPxJpBRD5d5B89vMv3lYc4z71U8DeJpNXtJNO1QeVqtidk8Z4J9DXWVwvjfQ7qyvYvFOgIfttr/AK+NP+Wyd8+uBQB3VUNb0uHWdFubG4QOssZCg9mxwfzqPw/rlr4h0aHULJwyuMMO6t3FadAHBfC/VJls7zw5qDl73SZCjs3cE8V3teaeMYz4S8dad4mgBjsZ28q+2/xsThc16TG6yxLIhyrAEH60AOooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK5Dx94kfS9OTTdNBk1K/PlRKvVAf4q6PVtUt9G0qe/u2CxQIWPPX2rivBGl3Gu6tN4v1tSZJsrYoRwkX09aAOj8IeG4/DWhpbnD3Unz3Mv99+5reoooAKKKKACiiigAoorG8Q+KtK8NWol1K4VWbiOMcs59MUAbBYKMsQAO5ride+IsMN4dL8M2z6tqWdrJF0i9yayvK8UfEJz9oD6LojHmM8SSj1BrttB8M6X4dtVh023VWAwZWGXb6mgDlNL8A3urXq6r44vPtlwCDHbRnEaD0I711Ou+GdO17wzcaHcQRraTJs2qoAX6VsUUAYfhPwlpfg3RI9M0eERxoPmbux9TW5RRQBxfxR8JL4r8IyLEhe9s8zWuP74rwbTrlrm12zDFxCfLmU9Qw619W14b8U/A8uh37eJNEhLWsh/wBLhQfd9WrGtDmV1ueJnGAeKo80Pij/AFY46io4J47mFZoGDIwyKkrgPz9pp2YUUUUAFFFFABRRUN1dRWcJklPsqjqTTHGLk7R3Ir4zzy2+m2IJu71wkYH619H+CvDUHhTwta6bAu0qu+T3c9a4P4TeBJY2HiXXocXUvNvE4/1Y7GvW676UORan6JlWB+qUPe+J7/5BRRRWp6wUUUUAFFFFAHl/xD/5K14C/wCvl69Qry/4h/8AJWvAX/Xy9eoUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl/iz/AJL/AOEP+vSb+teoV5f4s/5L/wCEP+vSb+teoUAFFFFABRRRQAUUUUAFFFMnl8mCSUjOxS2B3xQA+iuN8GfEKHxhqeoWcWnz2psnKFpBw/0rsqACiiigAooooArahfQaZp897dOEhgQuxPoK4PwdYz+KvEM3i/VkJiGY9PjYceX/AHsetR+I7mXxx4sj8Nac5/s60YSXs69Nw/gNeiW1tDZ20dvbRiOKNdqqo4AoAloorj/E/i6aK7GieG4vteqTcEryIB/eNAEvirxd/Zsq6Toyfa9YnGI4l58sf3jS+FfB66U7alqsn2vVZ/mklbkIfRfSp/CvhKHQIWuLl/tWpzndPctySfQegro6ACiiigAooooAKKKKACiiigApHRZEKOoZWGCCMg0tFAHn+q6DqHhHUX1rwuC9ox3XVj1yO5Wus0DxBZeItOW6sXyekkZ+8jdwa1CAQQRkHtXDa94YvNF1FvEHhEbZhzc2Y+7MvfA9aAO5qnq2l22taXPYXyb4Z12sKpeG/E1n4l0/z7Y7JkO2aBuGjbuK2aAPPvA2qXWiatP4Q1t8ywfPaSHgNH2X616DXIeP/Dcuq2EWp6WCuqac3mwFeC+P4T7Vo+D/ABJF4m0KO5UgXEf7u4ToVcdRQBvUUUUAFFFFABRRRQAUUUUAFFFBOATQAUVxWh/EaHW/G154dTTp4nteszD5W+ldrQAUUUUAFFFFABRRRQAVwnxR1aZNJg0LTJCupao4SHHoD836V3LuscbOxwqgkmvNfCqN4u+ImoeIZh5unWZEdgx7MOGoA7rw/pMOiaFa2NvGIxGg3Af3scn860JZEhiaSVgqICzE9gKdXn3izU7nxRrY8J6G7BAQb+4T/lkvpmgCtbrJ8R/FP2qQH+wNPfEQP3bhh/hXpKIqIqIAFUYAHYVU0rTLbR9MhsbOMJFEoGAOp7mrlABRRRQAUUUUAFIyhlKsAQRgg96WigDza+il+HXic6lbq39hX7gXCgcQMemB2r0aGaO4gSaFg8bqGVgeoqHUdPt9U0+azvI1kilUqQwz+NcJ4Vv7jwh4gPhPV5GNtISdPmf+JfTNAHW+KNFh1/w5dWM6b9yFox/tgfL+tYHww1qa98PtpmpPnUtOYxTqeo54/Su2rzPWgfB3xQtdUj/d6dquVvH7B+goA9MopFYMoZTkEZBpaACiiigAooooAKKKKACig8CuK8PfEeHxB41v/D0enTwvZEgzOPlb6UAdrRRRQAUUUUAFFFFABRRRQAUUVxvj/wARy2FkmkaUDLql+fLRV6xqer/hQBj6vLJ8QfF66NZs39kaa++6lXo0g/g969HhhjghSKFAkaDaqqMACsbwl4ci8M6HHaAh7hhuuJR/y0fua3KACiiigAooooAKjuLmG0gaa5kWONBksxwBXNeJfHum6BJ9khBvtSYfu7SHkt+Pauft/C2veNZ0vfGE72lhndHp0Z2sv+8e9AE2o+OtR8QXT6b4EtmnOdragy/u4j9Kv+Hvh3bWV1/aevzHVNTfl3k5jB9lPSuq0/TLPSrVbewt44I1GMIuM/WrVACABVCqAAOAB2paKKACiiigAooooAKZNDHcQtFOiyRuMMrDIIp9FAHifjf4T3WlzS6r4MjMiMd0tj6n2rzyLUUNybS9ja0vF4eGQYx+NfV9cz4o+H/h7xbB5eq2QDZz5sPyP+YrGdKMtTxsdlFDFvmXuy7/AOZ4F16EH6HNGD6V22p/Ae/t5c+F9cFtEOiTgsSPrWNcfCnxvaKCt1FdEnGFWud0J9D52pw/iov3WmjCwfSg4H3mVfqcVvwfCTxtdKC1/Dak9mXpW7pPwEMxz4r1dr0HqIMpTVCXUdPh/EyfvtJHm63sl3d/YtGtnv7w8COMdPxr1bwJ8JvJmi1jxaBPdjDR25+7H9R3rvfDvg/RfC1mlvpNmibf+WjAFz+NbldEKUYH0mByqhhPeWsu7/QRVCqFUAADAA7UtFFanrBRRRQAUUUUAFFFFAHl/wAQ/wDkrXgL/r5evUK8w8Xr/a/xi8KxWh3nTJGknxztBr0+gAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8Wf8l/8If8AXpN/WvUK8v8AFn/Jf/CH/XpN/WvUKACiiigAooooAKKKKACggEYIyDRRQBDDZ21uzNb28UTN94ogBP1qaiigAooooAK5Tx74mbRNIFrYgyajenyYEXqueN30FdFqWoQaXp097duEihQsxPtXCeDdPn8T+IZvF+roTGcpp8bDgR+uPWgDovBfhpfDeiKkxEl9cfvLqX++9dEeBk0EhQSxAA6k1wWteIL7xRqT6B4TcrEp23d8B8qDuo96AJfEPii91LUDoPhIeZcn5Z7ocrD61t+GPC1r4dtDg+deS/NPcNyXb29qn8P+HbLw5p4t7JMseZJW5Zz6k1rUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHF+JPClxb3/wDb/hY+TqEYzLAvCzj0x61qeF/Flt4htzGw+z30XE1u/BU10Fcl4o8Ivd3C6voEgtNVh5DLwJPYigDra821mCTwD4yj1uzQnSdQYR3US9Im7ua6Lwr4tXWN1hqUf2TVYOJIH43Y/iFberaXba1pc9hfJvhnXawoAsQTx3Nuk8Dh45FDKw6EGpK8+8DapdaJq8/hDW3zLB89pKejR9l+teg0AFFFFABRRRQAUUUUAFFUNR1zTdJtzPf3kUSDrlhn8q5m5+Lvgq0UGfWEAJxwjH+lAHXpZ20U5mjt4klbq6oAT+NTVxFt8YfBN2xEOsqSOTmNh/Sui0rxLpGtw+bpt9FKv+9g/kaANWigEEZByKKACiiigAoopGIVSzHAAyTQBxvxM1ybTPDRs9Ob/iY3zCKBB1bnn9K2fCOhxeH/AAzaWMK7SEDyf75GT+tcbpoPjL4qT30nz6fo2BauPuu5613HiLXbXw7os1/eNhUGFXuzHoBQBj+OPE8mkWkenaWPO1a+OyCJeT7mrXgzwxH4a0cJIfMvJz5lxMfvMx5xn2rI8D6DdXV3L4o8QJm/u+Yo3/5ZJ2x6ZFdzQAUUUUAFFFFABRRRQAUUUUAFc/4x8MxeJdGaH/V3UJ8yCZeGVh0GfeugooA5LwN4ml1S3l0vVv3erWB2ToeN3oR61c8c6AniLwpc2h++g86MjruXkVj+ONAube7h8UeH126hZ/61F/5ap3z68V0nh3XrXxHosV9aMCHGHU9VbuCKAMn4d6++ueFoReHbfW37qeI9VI4Ga6uvM+fBXxXdjxYa9l3c/djcdBXpgORkdDQAUUUUAFFFFABRR061m6p4h0rRoPO1G9ihT/eyfyoA0qhjs7WKZporeJJW+86oAT+Ncdc/GDwTakedrKDPTEbH+lLbfGDwTdZ8nWUOPVGH9KAO2orN0zxBpesQCbT72KVT/tYP5VpdelABRRRQAUUUUAFFFBOBk0AUdZ1a30TSbi/u2AjhQtjPLH0Fcd4E0m41bUZ/F+tKWnusizVv+WcR7Y9aqag8nxC8ZDTYGb+xdMfdcOvR5R29xXpEcaQxrHEoRFGFVRgAUAOooooAKKRmVFLOwUDqScVxGu/ENEvDpfha1bVtRztZY/uxe5PegDq9V1iw0Wye71K4SCJBkknn8q4KbX/EfjqY23hiJtO0w5DX8q/6xfb0q1pXw+utTvl1bxvem/ugQY4EOI0HoR3rvYYIreJYreNIo16Ki4A/CgDnvDXgfSvDce+KM3N253PcT/M2fYnpXSUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFcz4ws/FN5Hbx+E7+GyYn97JKm7ArpqKAOY8JeDYvDzS3t3MbvVLkf6RcHufb0rp6KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDy/xZ/wAl/wDCH/XpN/WvUK8v8Wf8l/8ACH/XpN/WvUKACiiigAooooAKKKKACiiigAooooAKKK5Px74lbRtKFlYAy6lenyoUXqueN34UAYXiGeXx14uTw5YOf7MsmEl7MvTeP4K9Djjt9PslRAsMEK4A6BQKwPCuhW/g/wAOFr2VftDDzbu4bjc3XNc/cXepfEK/a000vaaHG2JbjoZ/YUASalrGoeONQfRvDjNDpqHbd33r6qK7HRdEstB02Oy06IJGg5Y/ec+pPepdM0u00ewjs7CFYokGAAOT7n1q3QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBzPirwhHrWy+0+T7Jqtv8ANDOvGT6N61B4W8XPe3DaPryfZNXgGGVuBKP7wrra57xT4Tt/EVuskbfZr+A7oLlOCp9/UUAUfH3huXVLCLU9LUrqmnN5sBXgvj+E+1aPg/xJF4m0GO5U4uI/3dwnQq461leG/FlxFe/2F4oT7NqMfEcjcLOPUGsjWYZPAXjKPW7NCdI1BhHdRr0hP9+gD0qimQTx3Nuk0Dh45FDKw7g0+gAoorD8VeJ7bwvpLXUw8yZvlhhHWRvSgCxr3iHTvDmntd6nOI1H3V6lj6AV5xc+JvF3jN9miw/2JYdDNMMmVfb0p2maJea3errniyTz5m+aC2/hjXsCK6rgKFUBVHRR0FdMKXWRjKp0Rx9n8NtMS4N3qlzdXl033t0hKH8K24PDOh25JXS7Z8/34wa1KK3UUtjJtszZ/DeiXAAbSrVMf3IgKxdQ+HGiXkgnt3ubS4TlDDJtUH3FdZRQ4p7iTaORt9R8ZeCmDyN/bunDgQoPnjHrmvQfDHi7TfFNn5llJtmUfvYG4ZD6Vmg447HqPWub1nw1J9rGr+HJRY6lF83y8I/sRWE6K3iaxqdz1OiuX8F+L4/Elk0N0n2fUrb5biBuv+99DXUVzG4Vy3xC8QPoPhWd7TDXk/7uGPuxPBxXUngZNeZSZ8Z/FhUPzWOg/OG/hkY9qAOm8F6NB4U8GQpMwUlTcTO/UFhk5Nc7pcMvxD8T/wBr3it/Yli5FpGw4lYdSR3qTxJez+M/EH/CL6S5WxgIN/Mnp2UGu90+wt9MsIbO0jWOKJQqhRigCcAKoCjAHAApaKKACiiigAooooAKKKKACiiigAooooACAwIIyD1BrzbVYJvh94p/tiyVv7Fv3xdxAcRsehA7V6TVe/sYNSsJrO7jEkUqlWBGetAHL+PtFTxV4MaSykBkhxcwyJ1O3nANX/A/iEeJPC9tdsNk4GyWPupHFc14avJvB+vv4U1dy1jOSbCV+691JqHRyfBfxPudLl/48taJmtvSMjtQB6ZRRRQAVj+I/E+m+GNPN1qUwXskY5Zz2AFVfGHiuDwxpm/Hm3cvywQjqxPeuL0rw9c316Nb8Vyfar1uYoT9yIfSrhByZMpKJDca74z8ZuRp6f2DYdCZRlpR6g0lh8ONJt5jc301zeXDct5sm5SfpXXZ4CjhR0A6CkrqjTjE53NszIPDWiQZ26Xavn+/EDRP4a0S4xu0u1TH9yICtOitLIm5yV98ONJnm+02E1zZ3K/dMcmEH4UtvrvjPwY3/EwX+3rDoPKGGiX1JrrKXPGDyD1B6Gs5U4spTaNbw54p03xPZefp0wLrxJEeGQ+mK2a8q1Xw7cWd7/bfhaT7LfxcvEPuTDuMetdp4P8AFlv4o00uF8m8gOy5t26o1cs4OLOiMlI6GiiioKCuL8f+Ip7aGHQtGzJqeoHYFXrGh4LV0eu6zb6Do89/dkbYlJVc8uewFcp4C0a4v7ubxbrYJu7zJtkbrDEe1AHR+FvDsHhrRIrKLDS43TS95G9TWzRUVxcw2kDTXMqxRqMlmOBQBLWJ4i8XaT4ZthJqVyA7nEcS8szemBXLX/jnUvEd2+m+BbZpQDtfUWX5Ij9O9aXh34e2mnXP9pa3KdT1R+ZJZeUB9lPSgDFFv4p+IUhN4H0TRScGD/lpMvqD2rt9C8NaZ4ds1t9Mt1TAwZGGXb6mtUAKAAMAdAKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8v8AFn/Jf/CH/XpN/WvUK8v8Wf8AJf8Awh/16Tf1r1CgAooooAKKKKACiiigAoqOSeGHHnSpHn+8wFR/b7P/AJ+4P+/goAsUVX+32f8Az9wf9/BUscscq7opFdfVWBFAFfVNRg0nTJ766YLFChY5PXHauA8MRjVNRufHXiRxFAqkWKycBIuecetVvGOsweJPEA0t7jy9F05t97MD95x/B71csdMu/HdzFPfQtY+HbYj7NaL8vn46MfagBVXUPiRfb38y08OxN8q9GuSO/wBK7+zs7ewtI7a0iWKKMYVVFSQwxW8KxQRrHGgwqqMACn0AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBi+JPDNn4jsvLnHl3CcxTrwyH61ydtqEoWTwh45Tck6+XBdn7so4wM9jXo1eR/H3xhpmh+EHsmkQ6pPxAB9+P39qAOfv8A4wn4Tzt4a1WD+1JImLRSRvwsR+6DU/wg+Mmo+OPHeoWequkVqybrWL0OelfK1xdT3kxluppJpD1eRix/Wul+GmuN4e+IWlX2/bGs4EnPUUAfetxPHa27zzuEjjG5mJ6CvI9Hkl8beIrnX9TU/Y7WUxWkB6ZH8VbPxW11j4c03TLRyDrjiIFTyAcGrul2CaXpNtZRqF8mMK2O59a3oxu7syqSsrFsnJpKKK6jnCiiigAooooAKOlFFAHJ+K7efQruLxXoynz7Zh9phX/lsua9O0bVINa0i3v7ZgyTIGOOxxyK5uSJJ4JIZFDLIpUg+9YHwsvJNL8Qax4TkYmOzPnRbv8AaPNc1aP2jenLodj4219fDnhW6vesmNkajqSeOK4Sye58KeELTRdN/e+INZYyburIG55/CofiD4igu/G0Fu/7220b55oBz5rN0474Ndb4G8OTxPL4g1sb9SvPu7ukUf8ACB6cVzmxr+EvDUHhnRUtkG6d/nnlblmY9ea3aKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDB8X+GoPEuivA3yXEX7yCVfvKw6c14d4/8AibZW/hhLS/k2+LNKlCqcYJANe1+PNb1XQPCl1faFp7X92inbGv8AD718KeJNW1HXPEF1qOtAi8mcmQFdvP0oA9U0r9oPxfqHiezgnuY0s5pkjZAvIBOOtfWF3fxWGkPfXLARxReYxJ68Zr879Ol8nVLWX+5MjfkRX15411yXWPB/h/RrSQrcakqSMVPVV6imlfQHoL4eWfxXrdx4o1YF4dxXT426IufSuvJycmobW2isrKG2t0CRxoAFHrjmpa7ox5VY5JO7uFFFFUSFFFFABRRRQAoODkVyHiOKfwxrFv4n0dSBuCXcS9Ch6sa66orq0j1CxnspgDHcIUbPoamUeZWKTs7nWadqFvqmnQXtm4eGdAyMO4qySACScAdSa80+EWpyxx6n4eujtGmzlLYHq0fPNanxA1+4RYPDui5k1LUPlKr1jjPBauFqx17mXdl/iL40FohY6FpUmZSOkko6fUV6UqrGgVAFVRgAcAVzVhHo/wAPvCscV3cJFHCuZZD96RvXHU1zcmu+JfHkht/DcTaXpZP/ACEJB/rV9AO1IDf8S+PdP0OT7HaKb/U2/wBXaw8lvx7VhW/hDXPGU63njS5eCxJ3x6bESpQ+5710vhvwTpXhtDJDH592/MlxL8zFvbPSujoArWGnWmmWq29hbxwRqMYRQM/X1qzRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQB5f4s/5L/4Q/69Jv616hXl/iz/AJL/AOEP+vSb+teoUAFFFFABRRRQAUUUUAeI/FmxXXPizoGj3txcJZSWjuyQyFMsCfSqJ+Fvh3P+t1H/AMCmrX+IX/JdfDn/AF4yfzNbR6mvzviXG4nD41QpVHFcq0T9T3cvo050ryV9Tj1+Fnh0uAZdR/8AApqyvCfi8eC9B8WaVbTT3Vyt55NnC7l3UEYz+tejJ98fWvKvCvw51Xxb4+8Q63outJps1le7Arx7w2R1x+Fb8L4zE4jEVI1puSUer8yMxpU6cE4q2p6H4B8A3t5bR33ifcI2PmeQeDI3UMa9ajjSKNUjUIijAVRgCvMf+EN+J/bx5bAf9ev/ANaj/hDfih/0Plv/AOAv/wBavvTxT1CivL/+EN+KH/Q+W/8A4C//AFqP+EN+KH/Q+W//AIC//WoA9Qory/8A4Q34of8AQ+W//gL/APWo/wCEN+KH/Q+W/wD4C/8A1qAPUKK8v/4Q34of9D5b/wDgL/8AWo/4Q34of9D5b/8AgL/9agD1CivL/wDhDfih/wBD5b/+Av8A9aj/AIQ34of9D5b/APgL/wDWoA9Qory//hDfih/0Plv/AOAv/wBaj/hDfih/0Plv/wCAv/1qAPUKK8v/AOEN+KH/AEPlv/4C/wD1qP8AhDfih/0Plv8A+Av/ANagD1CivL/+EN+KH/Q+W/8A4C//AFqP+EN+KH/Q+W//AIC//WoA9Qory/8A4Q34of8AQ+W//gL/APWo/wCEN+KH/Q+W/wD4C/8A1qAPUKK8v/4Q34of9D5b/wDgL/8AWo/4Q34of9D5b/8AgL/9agD1CivL/wDhDfih/wBD5b/+Av8A9aj/AIQ34of9D5b/APgL/wDWoA9Qory//hDfih/0Plv/AOAv/wBaj/hDfih/0Plv/wCAv/1qAPUKK8v/AOEN+KH/AEPlv/4C/wD1qP8AhDfih/0Plv8A+Av/ANagD1CvlL4t694W1fx1dWGt6dNBeR/J9tL/ACqPpXrv/CG/FD/ofLf/AMBf/rV8s/Ey11Ky8c3kOtXy312p+eZRgNQAt94LtZWUeGdVi1Q/xAfKVrnLrT7vTLwxXETJJGQTjnH41Fbm5D4tDLuP/PLOf0r0nwl8OfHvia0RLeyaOxlPzzzryB+NAHo3gzxMPH3iHTI3bzI9Jt12j0YDrXr7HLE+teR/DPwYPAfxT1TRZJ/OkFkshb6163XZR+E5qnxBRRRWpmFFFFABRRRQAUUUUAKOCK848aeIIvAfjmx1pztjvVaOUj+LjivRq8u+MXhx/FmveFdGhuFtpLiVgsjDODWdX4GaQ+I0/hP4Su9fv5fFPiINJG0zPaK4xuBPGQete3AADAGAK8qtvAvxKs7WK2tvHNtHDEoRFFr0A/Cpf+EN+KH/AEPlv/4C/wD1q4jpPUKK8v8A+EN+KH/Q+W//AIC//Wo/4Q34of8AQ+W//gL/APWoA9Qory//AIQ34of9D5b/APgL/wDWo/4Q34of9D5b/wDgL/8AWoA9Qory/wD4Q34of9D5b/8AgL/9aj/hDfih/wBD5b/+Av8A9agD1CivL/8AhDfih/0Plv8A+Av/ANaj/hDfih/0Plv/AOAv/wBagD1CivL/APhDfih/0Plv/wCAv/1qP+EN+KH/AEPlv/4C/wD1qAPUKK8v/wCEN+KH/Q+W/wD4C/8A1qP+EN+KH/Q+W/8A4C//AFqAPUKK8v8A+EN+KH/Q+W//AIC//Wo/4Q34of8AQ+W//gL/APWoA9QpkzmOCRwMlVJx+FeZf8Ib8UP+h8t//AX/AOtUc/g74ni3kLePLcgKcj7Njt9KAMP/AIaR0y08S3Wka1prwRwuUM+cg/hVfxH4f+GnxdjaXQ9Tt7PVWHyyDCDPoRXzZ4shuoPFN9Ffzi4uFlIeUDG4+tZ9mLwyj7B5/mE8eTnP6UAdL40+HOu+CNQMd/D5sBP7q4i+ZWHrx0r2L4VX7eIbjQri5bzJNOgaMe1YPgbwb8UNatorafdHpU42tLeDeQp9M12Pwz8NJ4S8Ya3o5k8w27ja3rnrWlNe+iJ/CepHqaSiiu05QooooAKKKKACiiigApQcHIpKKAOC1HWIfB/xgivHYJazWDMyZx5kmOKb4f8AE0smo3WpabZvq+v3zHYmOLRD2zWd8RPCw8WfE7RLOe4Mdt5YMka8Fh9a9s0Lw5pfhyxS10q1SJEGAxGWP1NcVT42dUPhRyukfD+e/v11fxpdnULvgxwg4jjHoR3ru4oo4IxHDGsaL0VBgD8KfRWZYUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHl3iohv2gPCIHVbSbP616jXDePfDt7NqVh4m0OLztS0wFVi/voetWvDXxF0rxBfrpeJYNTC5kgeMgKR15oA6+iiigAooooAKKKKAPD/ixqltofxh8PalqQlW0SzkVpEjLAHJ9KrH4o+Fc/wDH1cf+A7V7lcWVrd4+1W0M+OnmRhsfnUP9i6X/ANA2z/78L/hXhZhkWGzCt7aq2na2lv8AI7KGMqUI8sUjxRPij4UDgm6uP/Adq1/gNcJfXPiu+t0kFvcX4aJpEK7hg8816p/Yul/9A2z/AO/C/wCFWILW3tUK20EcKnqI0Cj9KvLslw+XTlOk221bUVfFzrpKSJaKKK9o5AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArwXXv2e5vFfju51jUdQENpI2fKAyWFe9UUAcT4W+EnhLwntfT9NR5h1klG7J/Gu0jijiQJEioo6KowKdRQB5DrqHTfjebyYbI7y2WJG/vHHSuvYYYisb4v6ZIbLTNdhU/8SqfzZCvUrxWlZXiajp1veRkFZ4w/HvXXReljnqLW5NRRRWxkFFFFABRRRQAUUUUAA61xmqIdT+MHhuOIbv7PcvIB/DmuzLCNGkYgKiliT7Vzfwytn1jxjrfidl3W02IYCexB5xWVZ2jY1prU9UooorjOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACo50MltIi9WQgflUlFAHz9H+zb/a3iu61XXdQ/0eWUsIFHJH1r1Xwz8M/C3hOMDStMiDY5eUBj9ea6yigBFVUUKihVHQAYAryK6jOnfGiWOX5DfKXj/wBoCvXq8w+LVk+n3uleKYFJeycQttHQMaqDtJMmSujoD1NJTYpUuLeOaJgySIGBH0p1d5yBRRRQAUUUUAFFFFABRRTZriOzt5LqYgRwqXbPoKAOSj/0/wCPenpGN8MFizO3YMO1ev15h8JrGW8vdY8QXS7lubgi0c/88+elen1wTd5NnXFWVgoooqSgooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACoEsrWOczR20KSn+NYwG/Op6KACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAgvrKHULGW0ukDxSrtZT3rybww0/hjWbvwxq7n/WGW1kboVPRRXsFc34y8JQ+JtNHlnyb+3O+2nHVW9/arhLldyZR5lYqEYODSVzeieI5kuv7F8SxfY9Th+UFvuyj+9n3rpSCPp612ppq6OVprcSiiimIKKKKACilAJ6CsHXfE8emSLY6dH9t1Of5YoE5wfUntSbS1Y0rlPxjfz3fk+G9HYm/vmALL/wAsl75r0Xw7osHh/QrbT7ZQojUb8d2xyfzrD8EeDm0VJNT1dhcaxd8yyH+AdlH0rsK4pz5mdMY8qCiiioLCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACqer6bDq+k3FjcqGSZCvI6HHBq5RQB5F4Tnn0O+n8K6uxWS2Y/ZXf/lqufWut6VL438IjxFZLc2LeRqdr80Eo6nHO38a5nQvExurg6VrkX2HVYflaN+A2O4NddKd1ZnPOFndHQ0UpBHWkrYyCiiigAoopQCelACdelcl4uurnV7238LaKd9zcsDcleixdwTV7W/E32W4XS9FT7Zqs/CKnIj9ya6XwT4QXw9ayXV63n6ndnfPMeSD6D2rGpUsrI1hC+rNzRtKttE0e206yXbDboEUVeoorkOgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAMLxP4S03xRZ+Xex7Zk5imThlPbmvPp7fxh4IbbdRnXdNHWccNCv07169SMqupVgGB6gjOaqMnHYTSe55VpnxB8P6o/lJNLDMOCssZUD8TXQrd2bqCt7bHPbzRW1q/g3w/rsXl6lpkUi/9M2aI/mhBrnI/gj4AilEkejThwc5/tO7/wDjtbKu+qMnS7Fo3NoPvXtsPrKKw9V8c6Bo4P2q5d26AQrvyfwrUm+CngK4k3zaNOzf9hO6H/tWtrRvAPhnw/8A8gvS0TjH72V5v/Q2NDr9kCpd2cBDf+KfGjCHw/Ztplg3/MQcckfSu68J+B7HwyhnY/atQkH725k5JPt6V0scaRIEiRUUdAowKdWMpuW5oopbBRRRUlBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc74q8G6d4otMTr5N0nMVwnDKf610VFAHkE7+LfBLeVqtudZ05Ot6o5QfSrml+PvD+rHZBcSRyDhllQqAfxr1J0WRSrqGU9QRkGsHWfA/hzX02anpcbj/pk7wn80INbRqyW5m6aZlLd2bgFb23OewlFDXVmoJa9thjn/Wiq0PwT8AwSiSLRp1cd/wC07o/+1aJfgl4Bmk8yTRp2b1/tO6/+O1ft/In2XmZOqePvD+kDFxcSSOThREhbJ/CqcEni7xo4i0q2Ojae3/L43Vx9K9A0fwP4c0IY0zS404x+9d5T/wCPk1vIiooVFCqOgAwKiVWT2KVNI53wr4L07wvbEwr513JzNcPyzH29K6OiisTQKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAP/Z) ###Code from typing import Sequence class Initializer: def init_weights(self, n_in, n_out) -> Sequence[Sequence[Var]]: raise NotImplementedError def init_bias(self, n_out) -> Sequence[Var]: raise NotImplementedError class NormalInitializer(Initializer): def __init__(self, mean=0, std=0.1): self.mean = mean self.std = std def init_weights(self, n_in, n_out): return [[Var(random.gauss(self.mean, self.std)) for _ in range(n_out)] for _ in range(n_in)] def init_bias(self, n_out): return [Var(0.0) for _ in range(n_out)] ###Output _____no_output_____ ###Markdown Exercise e) Dense layerComplete the DenseLayer class below. The dense layer takes an input vector and computes an output vector corresponding to the value of each artificial neuron in the dense layer. ###Code class DenseLayer: def __init__(self, n_in: int, n_out: int, act_fn, initializer: Initializer = NormalInitializer()): """ n_in: the number of inputs to the layer n_out: the number of output neurons in the layer act_fn: the non-linear activation function for each neuron initializer: The initializer to use to initialize the weights and biases """ self.weights = initializer.init_weights(n_in, n_out) self.bias = initializer.init_bias(n_out) self.act_fn = act_fn def __repr__(self): return 'Weights: ' + repr(self.weights) + ' Biases: ' + repr(self.bias) def parameters(self) -> Sequence[Var]: """Returns all the vars of the layer (weights + biases) as a single flat list""" pass #Insert code def forward(self, inputs: Sequence[Var]) -> Sequence[Var]: """ inputs: A n_in length vector of Var's corresponding to the previous layer outputs or the data if it's the first layer. Computes the forward pass of the dense layer: For each output neuron, j, it computes: act_fn(weights[i][j]*inputs[i] + bias[j]) Returns a vector of Vars that is n_out long. """ assert len(self.weights) == len(inputs), "weights and inputs must match in first dimension" pass #Insert code ###Output _____no_output_____ ###Markdown Verify that your class is correct by running the code below, and verifying that `actual` is the same as `expected`. Here we define a small 3x2 dense layer with some fixed parameters and use numpy to compute the expected values. ###Code import numpy as np np.random.seed(0) w = np.random.randn(3, 2) b = np.random.randn(2) x = np.random.randn(3) expected = np.tanh(x@w+b) class FixedInit(Initializer): """ An initializer used for debugging that will return the w and b variables defined above regardless of the input and output size. """ def init_weights(self, n_in, n_out): return [list(map(Var, r.tolist())) for r in w] def init_bias(self, n_out): return list(map(Var, b.tolist())) layer = DenseLayer(3, 2, lambda x: x.tanh(), FixedInit()) var_x = list(map(Var, x.tolist())) actual = layer.forward(var_x) print(actual) print(expected) ###Output _____no_output_____ ###Markdown Exercise f) MLPWe'll now combine multiple DenseLayers into a neural network. We'll define a class to help us with this. We name it Multi-Layer Perceptron (MLP), since in the "old days", a single dense layer neural network was called a perceptron. It takes a list of DenseLayer as input and defines a forward function. The forward function takes a vector of inputs, the data inputs, and return a vector of outputs, the output of the neural network, after being passed through each layer of the network. It also has a parameters function which just returns all the parameters of the layers as a single flat list.Complete the MLP class below. ###Code class MLP: def __init__(self, layers: Sequence[DenseLayer]): self.layers = layers def parameters(self) -> Sequence[Var]: """ Returns all the parameters of the layers as a flat list""" pass #Insert code def forward(self, x: Sequence[Var]) -> Sequence[Var]: """ Computes the forward pass of the MLP: x = layer(x) for each layer in layers """ pass #Insert code ###Output _____no_output_____ ###Markdown Exercise g) SGDNow we need code that will perform the stochastic gradient descent. Complete the class below ###Code class SGD: def __init__(self, parameters: Sequence[Var], learning_rate: float): self.parameters = parameters self.learning_rate = learning_rate def zero_grad(self): """ Set the gradient to zero for all parameters """ pass #Insert code def step(self): """Performs a single step of SGD for each parameter: p = p - learning_rate * grad_p """ pass #Insert code ###Output _____no_output_____ ###Markdown Loss functionsWe are only missing a loss function now. We're doing regression so we'll use the L2 loss function $L2(t, y) = (t-y)^2$, where $t$ is the expected output (the target) and $y$ is the output of the neural network. ###Code def squared_loss(t: Var, y: Var) -> Var: return (t-y)**2 ###Output _____no_output_____ ###Markdown Backward passNow the magic happens! We get the calculation of the gradients for free. Let's see how it works. ###Code mlp = MLP([ DenseLayer(1, 5, lambda x: x.tanh()), DenseLayer(5, 1, lambda x: x) ]) x, t = sample_data() x = Var(x) t = Var(t) y = mlp.forward([x]) loss = squared_loss(t, y[0]) loss.backward() ###Output _____no_output_____ ###Markdown and the gradients will be calculated: ###Code for i,layer in enumerate(mlp.layers): print("layer", i, layer) ###Output _____no_output_____ ###Markdown Exercise h) Putting it all togetherWe are ready to train some neural networks!We'll train the neural network for 100 gradient updates. Each gradient will be calculated on the average loss over a minibatch of samples. Read and understand the code below. Answer the inline comment questions. We'll plot the loss for each batch, which should decrease steadily. ###Code mlp = MLP([ DenseLayer(1, 16, lambda x: x.tanh()), DenseLayer(16, 1, lambda x: x) ]) # What does this line do? learning_rate = 0.01 # Try different learning rates optim = SGD(mlp.parameters(), learning_rate) # What does this line do? batch_size = 64 losses = [] for i in tqdm.tqdm(range(100)): loss = Var(0.0) for _ in range(batch_size): # What does this loop do? x, y_target = random.choice(train_data) # What does this line do? x = Var(x) y_target = Var(y_target) y = mlp.forward([x]) loss += squared_loss(y_target, y[0]) loss = loss / Var(batch_size) # What does this line do? losses.append(loss.v) optim.zero_grad() # Why do we need to call zero_grad here? loss.backward() # What does this line do? optim.step()# What does this line do? plt.plot(losses, '.') plt.ylabel('L2 loss') plt.xlabel('Batches') plt.show() ###Output _____no_output_____ ###Markdown The plot should look similar to: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXgAAAEGCAYAAABvtY4XAAAcW0lEQVR4Ae2de6xlVX3Hv0ihyluRh1XOMGPoNNpYCiZoFWpNRaskptY2bUKtDXb6skGpyQwwMRiTWkraSJ1qmyAKFaUtJZhcxqAVakkp0strALUMLQOSQNXS1hr7hzSn+V3W7/KbfffeZ++zz7n79dnJzV577fX4rc9v7e9Zd+1z1pI4IAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAIEOETj++OOnZ555Jn8woA/QB+gDFfuApG93SMaLTTFx54AABCAAgeoEJK0Wq2qH7iDw1Z1KSghAAAJGAIGnH0AAAhAYKAEEfqCOpVkQgAAEEHj6AAQgAIGBEkDgB+pYmgUBCEAAgacPQAACEBgogcEL/OqBp6d7bt0/tTMHBCAAgTERGLTAm6hv3713unXXytoZkR9T16atEIDAoAXeRu4m7lt2rky37VpZG8njcghAAAJjITBogfcRvIm7jeQZwY+lW9NOCEDACAxa4K2BJurMwdPZIQCBMRIYvMCP0am0GQIQgIARQODpBxCAAAQGSgCBH6hjaRYEIAABBJ4+AAEIQGCgBBD4gTqWZkEAAhBoS+C3S7ov/H1X0vvKlp9nPXg6KwQgAIF6BNoS+Kjlh0p6StKWGJkNI/D1HEtqCEAAAl0Q+HMl/WNW0LPXCDydFQIQgEA9Al0Q+KslvTcr6Ol6RzJwdTKZ1GsZqSEAAQiMnEDbAn+4pO9IOqlA4NejGcGPvKfSfAhAoDaBtgX+7ZK+uK7iJQEEvrZvyQABCIycQNsCf72kXy/R9fVbCPzIeyrNhwAEahNoU+CPlPQfko5dV/GSAAJf27dkgAAERk6gTYEvkfONtxD4kfdUmg8BCNQmgMDXRkYGCEAAAv0ggMD3w09YCQEIQKA2AQS+NjIyQAACEOgHAQS+H37CSghAAAK1CSDwtZGRAQIQgEA/CCDw/fATVkIAAhCoTQCBr42MDBCAAAT6QQCB74efsBICEIBAbQIIfG1kZIAABCDQDwIIfD/8hJUQgAAEahMYlcCvHnh6uufW/VM7c0AAAhAYOoHRCLyJ+vbde6dbd62snRH5oXdt2gcBCIxG4G3kbuK+ZefKdNuulbWRPO6HAAQgMGQCoxF4H8GbuNtInhH8kLs1bYMABIzAaATeGmuizhw8HR8CEBgLgVEJ/FicSjshAAEIGAEEnn4AAQhAYKAEEPiBOpZmQQACEGhT4I+TdIOkb0j6uqTXbtyo77kYtuyjs0IAAhCoR6BNgb9G0nuShB8uyQS/8EDg6zmW1BCAAATaEvhjJT0q6ZBCRc/cQODprBCAAATqEWhL4E+XdJekT0u6V9JVko7MaLpd7kgGrk4mk3otIzUEIACBkRNoS+BfLekZSWclUb9S0odzBH49ihH8yHsqzYcABGoTaEvgT5Z0YF29pbMl3RyuNwQR+Nq+JQMEIDByAm0JvAn47ZK2JyW/TNIVG1Q9RCDwI++pNB8CEKhNoE2Bt3n4VUn7JN0k6YVBzzcEEfjaviUDBCAwcgJtCvwGES+LQOBH3lNpPgQgUJsAAl8bGRkgAAEI9IPAaAWelSX70UGxEgIQmJ/AKAXe14Znd6f5Ow45IQCB7hMYpcCzu1P3OyYWQgACzQmMUuB9BM/uTs07ECVAAALdJTBKgTd3MAff3U6JZRCAwGIIjFbgF4OPUiAAAQh0lwAC313fYBkEIACBRgQQ+Eb4yAwBCECguwQQ+O76BssgAAEINCKAwDfCR2YIQAAC3SWAwHfXN1gGAQhAoBEBBL4RPjJDAAIQ6C4BBL67vsEyCEAAAo0IIPCN8JEZAhCAQHcJIPDd9Q2WQQACEGhEAIFvhI/MEIAABLpLAIHvrm+wDAIQgEAjAm0K/AFJD0i6r4oRbNnXyM9khgAERkigiraWbZXa5J4J/IurFoDAj7B30mQIQKARAQS+ET4yQwACEOgugTYF/lFJ90i6W9KOgpG8xa/a32Qy6S5FLIMABCDQQQJtCvxLk6ifKOl+SecUiPxaNFM0Hew9mAQBCHSaQJsCH/X8MkkfiBHZMALf6X6EcRCAQAcJtCXwR0o6Oom4he+Q9JasqMdrBL6DvQeTIACBThNoS+C3pWkZm5p5SNKlUczzwgh8p/sRxkEAAh0k0JbA52l4aRwC38Heg0kQgECnCSDwnXYPxkEAAhCYnwACPz87ckIAAhDoNAEEvtPuwTgIQAAC8xNA4OdnR04IQAACnSaAwE+n09UDT0/33Lp/7dxpb2EcBCAAgRoERi/wJu7bd++dbt21sna2aw4IQAACQyAweoG3kbuJ+5adK9Ntu1bWRvJDcCxtgAAEIDB6gfcRvIm7jeQZwfNQQAACQyEweoE3R5qoMwc/lC5NOyAAASeAwDsJzhCAAAQGRgCBzziU0XwGCJcQgEBvCSDwwXU+H883agIUghCAQG8JIPDBdXyjJsAgCAEI9J5AU4G/UNIxkg6R9Mm0Bd+5pctCznlzM1aT9BE836jpfb+mARCAwHQ6bSrwtp67HW+WdKOkVyaRT9GLO22GwFuPYA6e5wICEBgKgaYCvy9J+JWSfj6F712crD9X0mYJ/FAcSzsgAAEINBX4T0n6oqT9ko5I2/Dd/ZwsLy6EwNNZIQABCNQj0FTgnyfpDEnHJSl/kaRXLU7WnysJga/nWFJDAAIQaCrwr5Nkm2bbcb6kP5G0JV1XOR0qyaZ0VmYlRuDprBCAAATqEWgq8DYHb9+g+Ykk1L8r6SuzxDrcv0jSZxH4ek4jNQQgAIEqBJoK/D1JrD8o6YIU9rig47nBl0n6sqQ3IvBVXEUaCEAAAvUINBV4G61fnF6ynizJ5uQfyJXzjZE3SDpT0hsQ+HpOIzUEIACBKgSaCryJuk2znJ30eyLpXRu1fEPMeZI+nmLLBH5HMnB1MplUaQ9pIAABCEAgEWgq8KbRJ0kywba/EzdIeX7ERyQ9IemApKckfV/SZ/KTPhvLS1b6LAQgAIF6BJoK/C9JekzSNZKulfSopHeWCXXOvbIR/HpyBL6eY0kNAQhAoKnA21IFcdR+giRfvmBdnGcEEHj6IQQgAIElEGgq8NkXqnVess7Q/YNvM4JfgvcpEgIQGDSBpgJ/haRbJL07/X1B0uUHS/NirhD4QfdDGgcBCCyBQFOBN/X+hfQLVvsVqy84thhVD6Ug8EvwPkVCAAKDJrAIgQ8yvLwgAj/ofkjjIACBJRCYV+D/R9J3c/48fuFKj8AvwfsUCQEIDJrAvAK/cAGfVSACP+h+SOMgAIElEEDgS6Cyu1MJHG5BAAKdJ4DAF7jI92fdumtlun333rWt/AqSEg0BCECgkwQQ+AK37Ll1/9TEfcvOlaltwm3XHBCAAAT6RACBL/CWj+BN3BnBF0AiGgIQ6DSBeQX+FEnXS7pd0iWSDgsvSW8K4YUF23jJyhx8p/suxkEAAjMIzCvwX5L0W5JOl/QxSXdIOj6puW3Bt/CjDYGP7BD7SIMwBCDQBwLzCvx9GQW3/VgfkvRySVV3dMoUUX7ZpsD7dA0vXPvQpbERAhBwAvMKvIn58zOS/LOSHpH0ZCZ+IZdtCjwvXL27cIYABPpEYF6Bf7+kn85R7p+UZNM3Cz/aFHgfwfPCtU9dG1shAIF5Bb5MwN9XdnPee20KvHUT5uB5WCAAgb4RWIbAPz6viJfla1vg++ZY7IUABCCwDIH/ZplQz3sPgaezQgACEKhHYBkCzwi+ng9IDQEIQGApBOYVeF8WOLtksMU/M+8ovSwfI/il+J9CIQCBAROYV+DLtLjKPfuK5V1pg277yuWHZmVC4AfcC2kaBCCwFAJtCfwhko5Kom7LHHxV0mvKRB6BX4r/KRQCEBgwgbYEPmr5EenXr2fFyGwYgR9wL6RpEIDAUgi0KfCHSrIlD74n6fKsoKfrHcnA1clkshQAFAoBCEBgqATaFHjX9OMk3Sbpxz0i78wIfqhdkHZBAALLItAFgTc9/6CkD+QJu8ch8MvqApQLAQgMlUBbAn+CJBu52/GCtK78eek699QlgWfZgqE+DrQLAsMi0JbAv0qSrRu/T9KDaQSfK+we2RWB94XHWDp4WA8CrYHAEAm0JfCu25XPXRF4lg4e4mNAmyAwTAIIfE2/+gjelw6+7s7H1jbktngOCEAAAl0igMDP4Q2fgzdxtw25ma6ZAyJZIACBpRNA4BsgZrqmATyyQgACSyeAwDdAnJ2uYZqmAUyyQgACCyeAwDdE6tM1iHtDkGSHAAQWTgCBXzhSCoQABCDQDQIIfDf8gBUQgAAEFk4AgV84UgqEAAQg0A0CCHw3/IAVEIAABBZOAIFfOFIKhAAEINANAgh8N/yAFRCAAAQWTgCBXzhSCoQABCDQDQII/AL9wHfiFwiToiAAgcYEEPjGCJ8twMSddWkWBJNiIACBhRBA4BeCcbq2oqQtOrZl58rUVpq0dWo4IAABCLRJAIFfEH0fwfsywixdsCCwFAMBCMxNAIGfG93GjNk5+Oz1xhzEQAACEFgeAQR+SWx9RM9a8UsCTLEQgMBMAm0J/CmSbpP0NUkPSbpw1t59XdmybybRlIC14quSIh0EILAsAm0J/EsknZFE/WhJD0t6RZnI903gfQTPnPyyui7lQgACswi0JfBZLf+8pDdlI+N13wTewDMHP6v7cR8CEFgmgS4I/KmSHpd0TBT0FN6RDFydTCbL5EDZEIAABAZHoG2BP0rS3ZLekSPuB0X1cQQ/uN5CgyAAgV4RaFPgD5N0i6SLDlLyggsEvlf9CmMhAIEOEGhL4A+RdK2kjxbo+YZoBL4DvQUTIACBXhFoS+BfL2kqaZ+k+9LfWzeoeohA4HvVrzAWAhDoAIG2BD5Id7UgAt+B3oIJEIBArwgg8L1yF8ZCAAIQqE4Aga/OipQQgAAEekUAge+VuzAWAhCAQHUCCHx1VqSEAAQg0CsCCHyv3IWxEIAABKoTQOCrsyIlBCAAgV4RQOB75S6MhQAEIFCdAAJfnRUpIQABCPSKAALfK3dhLAQgAIHqBBD46qwapYxrw8dwo0LJDAEIQKCEAAJfAmdRt3x3J9uf9bRLbp6eduneKXu1Loou5UAAAkUEEPgiMguMj/uznrpzZWp/W3auTG07P7vHAQEIQGAZBBD4ZVDNlOkjeBN0H8GzV2sGEpcQgMDCCSDwC0eaX2Ccd4/h/NTEQgACEGhOAIFvzrBRCYh9I3xkhgAESggg8CVwln3Lp2544bps0pQPgXESQOBb9Ht8+coL1xYdQdUQGCgBBL5Fx/oInheuLTqBqiEwYAJtCfzVkr4l6cFqG/ZJQ92yjzn4AT9dNA0CLRNoS+DPkXQGAt+y96keAhAYNIG2BN4G7qci8IPuWzQOAhBomUDXBX5HMnB1Mpm0jIrqIQABCPSLQNcFfn2Kfqhz8P3qLlgLAQj0iQAC3ydvYSsEIACBGgQQ+Bqwlp2Ub9QsmzDlQ2BcBNoS+M9JelLSDyQ9IemC9bmYgsDQp2j8O/H8qnVcDyCthcAyCbQl8AUyXhw9dIHnV63L7OaUDYFxEkDgO+J3H8Hn/aqVqZuOOAkzINAzAgh8hxyWJ+Qu/EzddMhRmAKBnhBA4DvuKKZuOu4gzINAhwkg8B11jo/mr7vzsen23XvXtvezs8VzQAACEKhCAIGvQmmT02SnZUzkbSSPuG+yI6gOAj0ngMB30IFMy3TQKZgEgR4SQOA76DQfwed9o6aD5mISBCDQUQIIfEcdYyKfNy1TN76jzcMsCEBgEwgg8JsAeVFV+MjevzLpc/P+ItbjLR0HBCAAAQS+R30gzs1v3bkyffnFN09N1P28ZefK2rdtLB0HBCAAAQS+R33AR/A2Nx9F3cU+b86+aEon2+yq6bL5uIYABLpLAIHvrm9yLXMh9mkZF3WfrrH7fljYvjvvUzd5aSxtNl0sw8viDAEI9I8AAt8/n61bbEJc9iL2khv3rYm7Td34KN/F3vJ6/pjOPjDiFI+nsXOVo276KmVm02xGHdk6uYZAHwkg8H30WonNJn4+aj/tkpunp1367K9g45SOifjFN+7LTWd5faTv/yX4h4LHF4l9rDv7q9soylXCRU2sWkdR/qL4aFNMUxQf02TDdfMUpS+Kz9Y363pR5cyqJ+9+Ud1F8XlldDmu6+1A4Lvce+awLb6IdSG3OBdrn9LJjtpN8GO67Mvb7H8AUey9k1cp0z90rPyicPbDwTAU1eH/bdh9/2DLy+8ovRw7e9jZZD/IsvGW3g/P63F+nc2Tx8nSzkqfLcfr8fr97OXEMmNaC5dxifm9zOw5ponhbLrsdVHdRfHZ/FWvi2yqEl8lTZEd2XZEX5flsT5reeMR7YjhmGaeMAI/D7UO5/FO50IeO1LsOEXprPOZ0MVpHSsr/gcQxT5PpC19jI95T925MrU/K78obPn9A8fsdFvjh4K3zx+qog8Xz5/98CqyL7Yt2h1tyopvvI55YlmxvhguSh/jY91V2hM/4KI/rRwXlywPy+Msq9aRl97y+pGt231a5ivLa2W4nbPCkX1sQzbe7bKzf+BFP5TlLWpnbF/0tZXl9cX2ZG3ycmN81qZYjnOtc0bg69DqSVrrFP6AlJmcl87irINmBdQ7ocVH8YkiHYUoPsTe+e2+d+AqYR9Rx7JiHW5TVvi9jhhv4SK7o30xTYyPZcY0Zs/5V90580MxcorhWEcsN8bHumM4po9l5jGyOBcxO2d5eH11mWXTex3W/9w/Vne028PZeM/r9nm6bB0xPjKIbYjxkUfsS5FZUd4YH+uNtlr5RfU5gzLeMW+0Kdo9r9Aj8GUKONJ7ecJvKDzeO218QC1snd47op3t2uMtj3/oeDl2zgvHh9A7eSzL8tgRR1CezuJi/vjA+MNqaf1hzdoX2+YPcbbMWI6nifZ5W2NZsb4Y9vxWR0wf4+u2x8s3UYnlOGuLL/sPrQqzIlFyNtm6YxuKfGXx8cMy2lEU9vosb7QpxkceHrb0MVyUN8ZHGyy/+cyZRt95uUWiXsXWaJ+z9H5fR5baFPi3SPoXSY9I2lW8Wd+zd4a+ZV8dp3UhrXdsO8dwtK0oPqbJC1u+KJhFdeSls/JivD9s9sBkxc7S+UMa7ciLj2Vmy/E6Z5UVy43hOnVXaU9WTM0uP4rakSdQZcyK0kdBtPxed7Zeu7YjG19Ubmx3DEdfxLwxPsvDp4qsbvszG4vyxvhsvd6GyNbKivXFD5poUyw3xkebYjmRpddX5dyWwB8q6V8lbZN0uKT7Jb2iTOQR+CruHE4af/CyD1G2hUXpYnwMZ/PXuV5UOXXq9LSx7hj2+/Fs9000XKDtOh5F+WN8DMe8MRzTeDgrXBbvh6eJcXYvGx+vq4S9/LyyPK6Mh+ePdXmc5zfhtvtFabLpY33GxPNn0+XFexqrK5Zj13WPtgT+tZJuCYJ+sST7KzwQ+LquJf2YCVQRomXxabPuojZttk2Lqq9pOW0J/DslXRXU/Fcl7QnXHtyRDFydTCZFviMeAhCAAARyCHRd4F3oxQg+x3tEQQACECgh0JbAM0VT4hRuQQACEFgEgbYE/ock/ZukreEl6yvXh+s5AUbwi3A3ZUAAAmMi0JbAm4S/VdLD6ds0l+Zo+kFRCPyYuiVthQAEFkGgTYE/SMBnXSDwi3A3ZUAAAmMigMCPydu0FQIQGBWB3gi8pG8nY1fnOB+YI8889XQpzxjbbPzH2O4xthlfS1X0xnRz8IeBGNsxxjabj8fY7jG2GV+PTdFK2jvGB2CMbeahL3kIBnhrjH18jG2e2XXHCGWMbUbgZz4Kg0owxj4+xjbP7LS25MHYjjG22Xw8xnaPsc34emyKRnshAAEIQAACEIAABCAAAQhAAAIQgAAEukig1q5RXWxARZtOkXSbpK9JekjShSnfiyR9SdL+dH5hxfL6lMw2j7lX0koy2tY3+mraKeyv0lpHfWpPFVuPk3SDpG9I+rokW7xv6L5+f+rbD0r6nKTnp7WshubrqyV9S5K1048i3x4i6U9TX98n6QzPMIZz7V2jegzlJcG5R6c1fmyHrD8K2yHatoiX97iNRaZfJOmzQeD/WtIvp8R/Lum3izL2OP4aSe9J9tuOaCb4Q/b1SyU9KukFqc3m43dLGqKvz0nPchT4It/ael5fkGRC/5o0sOlxt65neu0liesV3+nUn5f0prTnrYm/HXa2PXCHdLxM0pclvTEJvHX070iy1UrtyPaBFN3r07FJ7Kyt8TDfDtXXJvDfTP+lmG/tv7U3D9jXp2ZG8EW+/QtJvxI6QUwXoocZrLpr1NBab53jcUnHSPqv0DgThHgdbvU2aNMUZ0p6Q3roX5z+XfUG2dRVHAl5fJ/Pp0u6S9Kn09SU7Yx2ZMa3Q/S1TTt+Ly1Zcp2kIfs6K/DxuY2+tQ+614fObIOdV4frQQfHKPBHSbpb0juSZ2PHsKj/HJDHz5P08dSeMQm8PcDPSDortf1KSR/OCPzQfG3vjm6VdIKkwyTdJOn8AX+Ylwl89O2oBT777/nMjb3TA9PXk3V828jc5qT9iP+yDW2K5iOSnkiLiz0l6fuSbGQ39Cmak1Ob3cdnS7p54NNxvyjpk95gSe+S9IkB+zor8EXP8ainaGrvGhU6UN+C9m/btZI+mjH8isxLVntZM8TDR/DWtr/JvGT9nQE2+HZJ21O7LpNkfh6yr+2/Fft22BHphaK9ZP69Afs6K/BFvn1b5iWrTd2N6qi1a1SPydg83FSSfVXqvvRnbT8+vYS0r0n+XXpJ1eNmFpoeBX5bmqN+JAnADxfm6u8Nm4e3NUnM3zZdYVMYQ/f1h9LXQu2dyl9KMr8O0df2FdAnJf0g/Yd6QYlvbWD3Z2lXvAfGNP/e30cXyyEAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCGwWgf9LXye9X9I9kn5qRsW2oFeV79T/PV9Xm0GS2xCAAASWTMDWNfHDFq76il8UnLM/PilIJgS+iAzxEIAABDaJQBR4+ym8/WDIDlvPxxZtslG9/Xjk7Sn+ekn/m0b99stCO3amNPZfwB+mOBN4W5bZflX4sCRbSsAOW77a8v1z+oHSb6Z4W0LiH1K59iMeT59uc4IABCAAgboEfIrGNs3477QqpZVhS1zYSpx2+IqF9ovB7Aj+5yTdkX4+b2ltUwY7TOD/OIXt18T2C2I7bGPs3Slsv8K0X6La5iS/L+nSFG8fAraePwcEIAABCDQgEEfwtjCdrXViQm6Ltu0Jyz7YqN0W+MoKvIn4b+TUbwL/uhR/Uljx0JY2thG9LyVhG1icK8k2eLBlFWxNGVuCgAMCEIAABBoSiAJvRf27pBPTTkG21Z8JvR0HkrjXEXhfk9v+A7D8dvxt2qQiXR50+pH0YWHibyslckAAAhCAQAMCUeB/LC09a1MktrHEx1K5P5MWcTNxt8W8Hgv12X6/RVM0eQJvUzQ2z+8fHD+aNurYkubnrej35qwIGqokCAEIQAACVQj4HLyNmu0lqS29aoeNuv8pvTz9VNrU2gTeDtv71V6E+ktW2+fWNjq3Mv4gpYnfookj+OelNPbi1sqwTdJt+71fS9e2cbgtA2zz8hwQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAIQgAAEIAABCEAAAhCAAAQgAAEIQAACEIAABCAAAQhAAAKBwP8Dajpbh65w6sgAAAAASUVORK5CYII=) Let's also plot the data and what the Neural Network has learned. ###Code for _ in range(100): x, y_target = sample_data() y = mlp.forward([Var(x)]) plt.plot(x, y_target, 'b.') plt.plot(x, y[0].v, 'r.') plt.title('True (blue) and MLP approx (red)') plt.show() ###Output _____no_output_____ ###Markdown The plot should look similar to this: ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXIAAAEICAYAAABCnX+uAAAgAElEQVR4Ae2dC7RtVV3Gv332OedyCZXikQ88Al0zwQeBoY6L95rFzZARV9MaDXBkI7sebIRjWN14hGXKIbSHJqGnrBGi9lAyQ4ZiGIa5TbsonICQQK9C4aOsTFC4597Z+Paec5+551lr7bX2Wmvv9fjm4LBec835n78577f++7/mWgtQEgEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREIHMBI4BcBeArfbMjwN4ZUwpxwMwAOZjjmfZfS2AH89yQs68+wH8aM4y2n56OFby8ngFgH+0hWyx45B1KIlALQl8C4D7OwTg2972uSW36HcBXOjVMS0hPx3ALV69Za8mCfmf2QvUOYERv2/3U3CYfOGxu4YLcvuO7bf/BPDXAB43PNqMlXCs5G1VyHMvANahJAK1J5AkOEV4wj4gekEUneO8ndMSclb5bwCe5dVd5moSVwr55wHwV4JLZP3vAO6xAs79ofC4vFz63L4HwN8D+As/wwzWixwvUWPFb1IHwJy/I8V6yJPjkOORdSmJQK0J+ILzfAD3A/g1AF8BcE2MmDDcsc22mv8IfgfAlwF8FcA7vLBJCGaHFSp/PwXpcgCfAfBNAB8EQGFiCkMrvq08/psA3j3I2v//cwD0APwPgNsAsD1++mMAv+Hv8Na/G8CHAHwdwH/b9fCC8wYAnwTwfwA+CuBo7/yXA/gSgP8CcAmA0FYvKyjkZEZerJfpbAAftj/9KThMofDY3f2FL+Tc8YsAbvczeOs/B+Bfrd1fAPAq75jr84utqNFu/1cZbWWf/p09/x8APMk7n2OBdfMi+UW7/xdsP38DwN8CeLzd//bg4nUFgI8BoCiHKW6sXGb7gL8iOQZ/wNrGunhx/CmvoKNs/RxXHF/sPxdacdlo9063oaUI1JWALzj8R70OgP/AKNCMY0eJiS/kDAfwHyvF91EArrPCHMWD/+CvDw5QkOiJPg3Ad9l/6E6cswj5E6yInmU9tTPtth8Dfa0NQQQm9Df5j/4nARxu2/E+AH/jZaSd9wL4fsuF279tj59kQxwUH3L7PcsxLkZOcXwjgD8CcL4t468A/MyEQs4LCj1yXnij0osAfJ8VTIrWQwBOtRldn9Nm2s7jDwJ4ij1OW3nhcm17ayCGHAsUefY/x8sL7AWB5bO8twG42ZZFtnfbMfW8iF9nvu1xY4UOw8n2vsljANwHgBcq/hr4QVsm+4OJv1DIleOK44vjLBRyjt0LbH4tRKC2BEIhfwTAYV5rkoScnhT/0VMkXHqu55m5fW5JTzX8+e8LIvPxHyFt6Gb0yPkrIhSyGwD8rKscAD1FCl6adIr1zF1e2vnrbgPAqwF8xG6/LmgXhYNtGCfkZwD4FIAjrXdOIaTQpPXIKcj89UGBeg8A/6LlmbpplReo19i9Tshps0sUv0vtBoXc77MjABwE8ER7nEJO8XbpTwC8yW0AYP4Dti+5+9kA6D3z1wsvXHEpbqz8lnfCTwP4hLfN1VX7q4vjh/XSY3dpJULIyY39pyQCtSYQCjlFwU9JQn6svTlHMXF//2u9U78Mt07xi/LI6X25REGhOHxvRiG/yt78c3ZwyYuMf2M1ySOnt0gRoMDwpzj/aAcFgYlC7s+u8bkw9PBmm88tHkgh5MzLmDjPpQAyZRFy3x57euSCs3X+yQooufAiwzADE4Wc4SQ/0R6GQZgo5GHbmJ+CzERGT7brXDA85Pcn9zFMt93Ls8+G4hxb79BwNW6s8GLsEm9Wsi1+n/MmPm1/rLXNv0AxpCSP3NHTslEEQiFnjNxPLwPwWW+H+wfC+CRvNtErZFgjTaIHypikn0KP/KkJHvkdAH7CO5kC6sIwFwFgDDwpJcXI6YHSFraPiR45RcrdwEsScsbdfa+VF4U0HjnrYZyfM4dcnLZoIWd4g330UgALg6b1Q0YM7TBFeeR/mdEjd/dLWF7okVNIfY+cIv8v9sLCPotLcWPFv3jRo2dYJypFeeSMr4dCrhh5FD3tqx2BcULOmPDDVtgYcqF4UuDcP17GTPlTnN45E0X9x+x6uFi03p8v/BRIXjwYUqEAMjb9XntiGCPnz2AeoyBx9glnHDgh5099en6sm/+IaStFyr9hyfgspyFGJYYD6E3yPMZ7P5BByBmzpSdI8WEbeSOT9xrGhVZoB+v6Ee+GXyjkvLlKm/w/nhdeWKLaxH28b8FQCC8UDIXRO6ewh0JOm2k7Y9f8JeNCEvTI+evEtY33RGiTS/5Y4D62mR47L4S8iPgxdY4l3kh+pvXiuc58USlurPhCzrbxFxRvNHNM8O+HANAZYOIFiRdYjiuOL44zX8jdfRXaqSQCtSYwTsjZOMYrKZq8sXReIOQUGMYeORuC/+A5OyLp5hF/pjOe7RIFyZ+1wpulbjZIKOQnAvi0FU2GaP7AE3KWx5/7nFXBGCzFhHmWbEX8B+7/snD1uyVnVtAWCjIFnz/D03rkLIOxeN6ISztrxQmpq98tQyGnDeEffyWkFXKWSy+YM2QYguB9BIqbq58XOwqc62O2gcLoEoXczVohG964PMEdDMaC271sbwyzHzgTiBdT2syZI36oizd66Z3HCWnUWPGFnPXxpiz7mf1N9rwH4i4OvGfA+jkuo2at/Kq9Me3s1lIERCAlAf7j8p/sTHla7mycs80ZLUqjBJyQj+7d2KKQO9Hf2DudtTLHCi8eHIful+R0WqRaREAERKAEAlUW8hKaqyJFQAREoHkEJOTN61O1SAREQAREQAREQAREQAREQASKJHDUUUeZ0047TX9ioDGgMaAxkGEM2JlARcrx5GVRxJVEQAREQASyEQDAJ3CrkSTk2TpPuUVABESABCTkGgciIAIiUHMCEvKad6DMFwEREAEJucaACIiACNScgIS85h0o80VABERAQq4xIAIiIAI1JyAhr3kHynwREIH6E+j1jFlZMYbLSZKEfBJqOkcEREAECiJA8d661Zhud7CcRMwl5AV1hooRAREQgUkI0BOniAODJbezJgl5VmLKLwIiIAIFEqiLR85PeX3Ofv0j8RFSPdlZ4OhQUSIgAjMnQJF2sW9/PTQs6ViYN2p7Gh45v4bObzfyM06JSUIe1UXaJwIiUEcCvqe9uGjMli354uBJDMoWcn7/72MAXiAhT+oGHRMBEagjgSRP2o99dzrG8C9PHDyJT9lC/n4Ap9kvpMd55HusEfuWlpaSbNUxERABEagMAd/j5qwTbvvC7h+vs0d+NoCrbCyFn6iKE/JhuEWhlcqMURkiAiIwhoDvcXPWyfLyxjRCCje3V1fTxcjHVDX2cJke+eUA7gewH8BXADwE4N1D1Y5YkZCP7S9lEAERqAgB3+OmR07hdtMIGUJhKMV56mNN9l35sZk3ZyhTyH2plke+mb32iIAI1JyAr79O2F0sfFM83M/Mdrttuu05nwiSkNd8IMl8ERCBihDo9cza7kvNlU99m9nZ/YS5uHOZ2dn5uHnfMcvmvnP3joq1L97z88bMzeW6EzotIfe989h1hVYqMiBlhgg0kIBzgLnMnSjEu3YZs3fvIAjObQbG6YYDZh1zZh2d/vIQYPp/zlVn/IXnujgMRXxhIdfcRAl57h5VASIgAlUnQPGeKHoRqj+3d+8eCnZfuCnQ9KqdUFsx5zEKuFse7MxtiLXvkdMwbrsnhyaAKSGfAJpOEQERqBeBcIYJt2OTE+8osaXoekI9XKdX7TxsT7yH3jgwCK94Yr222jM37VoxXOZNEvK8BHW+CIhA5QlEeuROsLlk4pJTTxgioSiHsWs/HOKLOT1x51XzfDvv8IHdy+afH/sic+exzzP37F0dYRRpz0iObBsS8my8lFsERKCmBCim927bNRBVetuMS9OTdiLMpR8emZsz691Fs97pmvUtNvzBPN2uWeueYm467jyzdu7lkSGRcUKd6RdCCt4S8hSQlEUERKCGBKimVEyK9o4dIyGRg3PdYfy6L+aht93pmN7iTrNzgbNPVszOxV7fYef/estXm61b1vtOO3Wd1YRpnFCPE/qwvHHbEvJxhHRcBESgXgSokrwhyfCI72HbcMh/4FhzAIPpfoxhH+wuDMTeetv9t1stL5uV5f39Ingai6I4M40TaeZJI9TMw7K4zJsk5HkJ6nwREIFUBIoUrtgKWYk3DXB4M9KLad+CU8yD2NoX84cxb67fbePXgYHcdNrue95x+0ObguLCw4VuS8gLxanCREAEogikFb+oc7kvtSjSxY3wwoeCPjfXj2szVDISMompOK5eRmsYjeGyCklCXoVekA0i0HACacIRcQgopk54z59fNfuXE+IRzOx75IyJ8KEdN5uEx7NcGCKMYhFRnnpE1qntkpBPDbUqEoH2EsgjflfvvnYYCunHtPlgjR/rCLGyskC4wyx5tvNclPLUm3SuhDyJjo6JgAgURoD6mvrmnsu8umr2d08wBzD4OrF7UnLk7mNhFqYrKM9FKV0N2XNJyLMz0xkiIAIJBJwGc5k28enGW05fNnyIph94drELzvXuDN5Zsu4eeXdzv7NUkNaQlPmi2hi1L2VxubNJyHMjVAEiIAKOAMWMGkyt5YORaW4Gru29xnwbi4MXS1GsOceb4RO7PnzCkgW6l1SlEHEnrLQh9S8B15CYpSszrJ7b7tqTFPWJKTb3bgl5boQqQATaSyAUNgomRdzN9qNDHYreCK1ez9w098P9NwW6kw6iYzgt8BF0+7FxCn1WJXbC6mwpwol3ZfL+aSjWs46bS8hHRpU2REAEfAKhUIfHQi+U+ek4OyGngFLkYtPKilnrPH3EI3947jCzp7NqLsSK2T7XSz4/omDawKmBTsSdLf5DPRGnjd2VJNZJIj+24AIySMgLgKgiRKBJBJx4MyQRCrXfzjhh43n+a0xYXj+5goc7Nh6BpJjf0jnVPLDjZf23ASbV69sQrrNonutE3C05tTxtqCcs0227sqM8cuaJap47t+xlmUJ+GIDPALgNwB0AXh/7RQl7QB+WKLu7Vb4IJBPwxcqJMT3aKG/Wz0vx5LZLTtT6r2il4iddFVxmr4CIXa7oxKV/caGI0zNnWN21JbQzsbCIg5PaFVFUobvKFPIOgCOsRi8A+DSA5ySJuYS80L5VYSLQJ5BFfCiEzovlkl5snAfKwiPL5k7O4+b7TrZsGRTglDTuqlBQX7Hq0Jv3xT3qglRQ1TMtpkwh9zX7cACfBfBsf2e4LiGf6VhQ5Q0kECVsSc2k4+xiylyGk0QihdsvkBn8JytdYQlXBVcm66bocjtPcuW5crh04RZeT1hPUgrPT8pblWNlC3kXwK0AvgXgilC47fYea8S+paWlqnCRHSLQCAJZvdHQI+e2S04Qkzz0vhKH7zrhNpU0Qqldmf6vgKTwx6Qiy6r564L1jCufxxPb6IBUaFm2kDvtPhLATQCe5nZELeWRV2hkyJRGEHBCmVaYkvJHXhRCZeW275HTBWaYhfsjkl+mc97jwh9JtkUUPbLLryeufJ6QNt9I4RXYmJaQU7dfB+BXogTc7ZOQV2BEyITGEQi1dlwD4/JvEtLVtc0BaRbOjCnfdeLKTPLInT0skiJMwU8S46j2uXp4njxyp7jplscAoCfOtBXAJwCcbbcjFxLyqCGofSJQHQIjHwwuyH11Qh0ReelfE1yog46+u3eaJMak5crk0qWofe6Yv0ybzz9n1utleuTPAPA5AGsAbrceeaSAu50S8lkPB9XfdgKJIsaDTlW5pPL62zxecAqvFfTKuS+pqtDMuLzcP66sgptTWnFlCrnT59RLCXlp/ayCRWAsgbECGKqqU0G3HFtD9gxjbYooMsrMMNsk5YZlVGlbQl6l3pAtIlAAgUk9zUgB9Aubkfr5JqTBk8bMyLamKbyieSTkFe0YmSUCkxBII2Jx5Ybn9p/KDEMnWVU1rrKS948zM2wrt+ucJOR17j3ZLgIBgdDTHBdTDgWP4n3Xk8403zz2RGN27Jh8mkhgVxU3w7ZX0ca0NknI05JSPhGoAQHf0+QUbk63i3sIxs9Lx7vvgbv5ff6k7nFz9hK41Eks62RriFxCHhLRtgjUnAAFyZ9zTU2mmNNb91Povd+0a2X0+XyeeNxxE0/tCC8U3K5qqpOtUQwl5FFUtE8EKk6AwkMhjhNHHnMP2VCP6Z2Hebntvk7PpfPI+V1M99d/2cqELMILBbermupkaxRDCXkUFe0TgQoToACH9yBDc10einnse7h7PbO+Zas52On2l1T63uqaOb/zh+YjONOc3337JvEP60nadjbkiMwkFV/osTrZGtVwCXkUFe0TgQoTSOs9UpyYl8vIFFFQxK7IU9PuHGtD2oKmkK9OtoY4JOQhEW2LQMUJUHDGeeSpmhBRUMSuVEUp02wJSMhny1+1i8BEBArzHiMKitg1kY06aXoEJOTTY62aREAERKAUAhLyUrCqUBEol0AmrzlT5nLtVunlEJCQl8NVpYpAaQSoy6li5MzI72YmPRVUmpUqeJoEJOTTpK26RKAAAqlmllDE/S/1xD0VVIA9KmL2BCTks+8DWSACIwTGRUJSeeRU+/DbmZxQzpOVGkdAQt64LlWD6kwglUjHfAFn2G4Wwmf0Kdz0xPnH8Ao/BKHUSAJlCvkT7QeX7wRwB4DXjPvChD4s0cgxpkZlIJAqbJJUnn8l4HfRGCOnqMsTT6JW+2NlCvnjAJxqxftRAO4GcFKSmEvIaz+e1ICcBHwd5g3NzPqb+0qQswE6fSYEyhTyULM/CODMcKe/LSGfyRhQpRUjQPGmHmcWcbYj95WgYjBkTioC0xLy4wF8GcCjfeG263usEfuWlpZSGa1MItAUArlEOw5CKYXGVab9VSAwDSE/AsAtAF4SIeIju+SRV2FIyIZpEZDzPC3Sza+nbCFfAHADgNeOKHbMhoS8+QNOLdwgMC6cnehYcwbKrl2aibKBs9VrZQp5B8C7ALwlRrc37ZaQt3ostq7xSR65O8b3ifOjECMzB7nhphVyOXKwdRjVYMPhgH2bBLWgHWcAYAVrAG61f2cllS0h15hsG4E4r5veuv+Fn5FneeiJ+0LObaVWEyhTyJM0O/KYhLzVY1GN9whQ4OmJO72mqFPc+0keuUdKqyQgIdc4EIGKEqBe0xOniG+aU86DipFXtOemb5aEfPrMVaMIpCYQF3pJXYAytoKAhLwV3axG1paAlLy2XTdNwyXk06StukQgC4HE2EqWgpS36QQk5E3vYbWvngQS73bWs0myujwCEvLy2KpkEZiMAEWcNzL994mPzD+crFid1VwCEvLm9q1aVgMCm0Lg3MEpKm4SOcV80xNBNWiYTJwqAQn5VHGrMhHYIOA0m998GE4v9J/bp5jTM2dGJRFIICAhT4CjQyJQJgFfsynm/Qd+ItW9TCtUdhMISMib0ItqQ2UIUIcpyGmc6FjNzlJIZVouQ2ZJQEI+S/qqu1EEqL/8cD3D2lyOE3Me51fY9CW2Rg2DmTRGQj4T7Kq0iQQoyO7dKFxyOy5RxBkX3975pLl07g1mbe81cVm1XwTGEpCQj0WkDCKQjkAWIWf4ZQ9WzcNYMAcwZx7BvF5Hmw6zckUQkJBHQNEuEZiEAL1sfrieoRUuk0Ira6s98zDmzSHrwveXeh3tJNh1jt5+qDEgAsUSoHinutm5smIOodOPxTgx1wciiu2LNpVWtkf+pwC+BuD2yBeQBzv1PvI2Db2Wt9UFyem+c7743r0tB6Lm5yFQtpDvAHCqhDxPF+ncxhJI7b43loAaVhCBsoWcPvfxEvKCekvFiIAIiEAEgSoI+R5rxL6lpaUIE7VLBERABEQgiUAVhHwYKVeMPKmrdEwEREAEoglIyKO5aK8IiIAI1IaAhLw2XSVDRUAERCCaQNlC/ucAHgBwAMD9AH5+GEeJWFFoJbqTtFcEREAEkgiULeQRch2/S0Ke1FU6VisCmlpYq+6qu7ES8rr3oOyvHgH3sM/IFyOqZ6Ysag4BCXlz+lItqQIBfvl+27aN720OvxhRBeNkQ1MJSMib2rNq1/QJUMT999jy8fvhN9ymb45qbA8BCXl7+lotLZsA317oCzk9c4ZZlESgZAIS8pIBq/gWEQg9cm4ricAUCEjIpwBZVbSIAMWbnrlEvEWdPvumSshn3weyQAREQARyEZCQ58Knk0VABERg9gQk5LPvA1nQQAJ6HqiBnVrhJknIK9w5Mq2eBPQ8UD37rc5WS8jr3HuyvZIE+M1OPgfEmYh6HqiSXdQ4oyTkjetSNagIAnlCI/LIi+gBlZGFgIQ8Cy3lbQWBIoQ4z4WgFZDVyEIJSMgLxanCmkBAoZEm9GK72iAhb1d/q7UpCBThkaeoRllEoDACEvLCUKqgJhEYCY2MbDSplWpLUwiULeQvBPB5APcAuDD+kxKDI/qwRFOGVYPaIfe8QZ3Z3KaUKeRdAPcCOBHAIoDbAJyUJOYS8uYOtNq2TAHz2nZdmwwvU8ifC+AGT7gvAsC/2CQhb9PQq0lb5ZHXpKPabWaZQv5SAO/0VPvlAK70tt3qHmvEvqWlpXb3hlpfHQJ+XNxfr46FskQEhgSqIORO0CGPfNgvWpklAXnhs6SvuicgUKaQK7QyQYfolBkToIjzfeJzc3rGfsZdoerTEyhTyOcBfAHACd7NzpOH7nfEijzy9B2nnCUQcJ64E3Eu9c3NEkCryKIJlCnklOqzANxtZ69cEqHdI7sk5EV3r8rLRMCfoUIRp2dOcVcSgYoTKFvIR4R63IaEvOKjpenmOY+cryz0PHHupsZL05s+AOrbPgl5fftOlpdBIFDtGG0vo2aVKQITE5CQT4xOJzaCQCDcYZv8aIveLR7S0XZVCEjIq9ITsmP6BPil+/n5wQwVL5TiGyKP3Keh9aoSkJBXtWdkV7kEqNDuMz78lA9vbtL9jkhjnPaIM7RLBKZLQEI+Xd6qrSoEdu82hyjgwGBJUadiK4lADQlIyGvYaTI5J4Fez6zNPcM8iK3mEXT7y7Udr85ZqE4XgdkRkJDPjr1qnhWBlRWz0rnIbMfN5kKs9JcX7N4/K2tUrwjkJiAhz41QBdSOQK9neos7zVY8aLp4xHRxwCwuDiIriofXrjdlsGGEEPvGPaczteN6IEhjcmoEej1z5elXmyd19jNM3r/vubw8eA4oeB5oaiapIhGYlICEfFJyOq/2BMLZhxRyN5GFy5hJLLVvtxrQPAIS8ub1aStblDUk4kS80zFmYcEYbrMMTieXR97KIVTrRkvIa919Mp4Esgow81O87ezDkSnkWS8I6gERqAIBCXkVekE25CKQ9TF65ndvqqWY8+FOCriSCNSVgIS8rj0nu4cEJvHIGUKhmLuwyrAwrYhADQlIyGvYaTJ5M4GsIZGs+TfXqD0iUB0CEvLq9IUsEQEREIGJCJQl5C8DcAeAQwCelXYiuuaRT9SHOkkERKDlBMoS8qcCeAqAj0vIWz7C1HwREIHSCZQl5M4Jl5CX3oWqQAREoO0EqiDke6wR+5aWltreH2r/pAR093JScjqvAQTyCPmNAG6P+DvHueMKrTRghNShCXwsk/MIOZ8w5ks/dWiGbBSBSQnkEXJPr2NXFVqZtGd0XjoC9MT5RE/UY5rpSlAuEag9AQl57buw5Q0IH9OkZ05xVxKBFhEoS8hfDOB+AA8D+CqAG2J9du+Aph+2aOQV1VSKtntMk545wyxKItAyAmUJuSfP6Vcl5C0bfUU1Vzc6iyKpcmpKQEJe046T2SIgAiLgCEjIHQkt60FA3nc9+klWTpWAhHyquFVZLgIuHq4vP+TCqJObR0BC3rw+bWaLeBNz2zZj+Ekf95FNfYutmX2tVmUmICHPjEwnTJ3A6qo5BAz/9ODP1HtAFVacgIS84h0k84z5xum7+iJOT5yC/tBx2zRXXANDBDwCEnIPhlarSeD63aMeObeVREAENghIyDdYaK2iBHiP8/z5VfMR7Oovua0kAiKwQUBCvsFCaxUmoFmHFe4cmTZzAhLymXeBDBABERCBfAQk5Pn46WwREAERmDkBCfnMu0AG5CWgsEtegjq/7gQk5HXvwZbbTxHnyw/1sGfLB0LLmy8hb/kAmHnzc7rTfLiTIq6HPWfekzJghgQk5DOE3/qqC3CnCyii9d0gAPUnUJaQvxnAXQDWAHwAwJFp3kqu95HXf0ClbgEVeNeuwXc2c7rTOZ361CYrowhUlUBZQr4LwLwV7ysA8G9skpBXdZgUbJdzo/mxZIq4PppcMGAV1zYCZQm5L9r87Nt7/B1x6xLylgw/P7BNEadnTnFXEgERmIjANIT8OgDnxYk3gD3WiH1LS0sTNUIn1YSAi4HwlbR2qsn6lq3m6uWedLwmXSgzq0kgj5DfCOD2iL9zPNG+xMbIO96+2FV55NUcJIVY5cIpbp7g6qrZv7xidi72NHWwEMAqpM0E8gh5rCDbA68A8CkAh4/L6I5LyBs8FP1wCsV8ZYX/ZZ466Jx6RWIaPFbUtMwEyhLyFwK4E8AxTqTTLCXkmfuvPieEHnlvEE7J8jBPRBH1ab8sFYESCZQl5PcAuA/ArfbvHRLyEnuxLkVHuNMRuyJbw3wFzVaMLF87RaDOBMoS8jS6vSmPPPI6D6XybHeeuGYrlsdYJdebgIS83v1XLevTutcZrfZj6ZqtmBGesreCgIS8Fd08hUY6t9nNSuF2QanEoguyUMWIwGwJSMhny785tftus52VEjYuj8Oe59zQDm2LQNMISMib1qOzas8Yt3nM4VlZrXpFoBEEJOSN6MaKNCLBbU7hsFekETJDBOpHQEJevz6busUJ+pzaFnnkqVEpowhkJiAhz4ysXSc4AeZskfl5Yz587jWDCd18X0rGVMQFIWOVyi4CrSAgIW9FNyc3MklgGRKhiD8HPXMVXmW+jS3mEF89y78JxDzZEh0VARGYhICEfBJqDTrH97gXFjZrM4Jr+K4AAAkiSURBVI+/ZO5a8yC2mnV0zEEr4n0x56OWSiIgAjMnICGfeRdkMyDJe85W0iC387idk83wCevoJ64sL5svdo43B9A1azi5L+iPoNtfru29xmbUQgREYJYEJOSzpJ+xbupqlpdMpSmeZdITd0LOMArFva/mrKzT6R9cx5x5Iy4y23GzuRArZnvnk4N8aSpRHhEQgVIJSMhLxVts4WVN4WOom5740ztr5tL5FbO22huoOR/scQoPmF73DLN14UDs+8N5UaCNXCqJgAhMj4CEfHqsc9dUhkfujLpn76pZn1swhzpzA7ff+4qP2bKlH2KhQseJdVbb4spx9mgpAiKQnoCEPD2rSuQsRQAp2oypOO/bxVcyVJbl10JW0a8EeBkhAhUmICGvcOeUapoTaYq4HySnmHObxzOkLOKcRfQzmKCsItBaAhLyNnZ9r2f40eODna452J0f3tDse+T0xinu9n5nlpi3uzaMuwZkEf02do/aLAJZCZQl5G8AsGa/DvRRAI/f9BWJiB36sETW7pssPz96zOmEFO4DmDMHuwuD0Io3kbxssU0r+pO1UGeJQLsIlCXkj/Z0+gIA+tRbhcbV1cu9kfng1+9e3TTdROGPCnWYTBGBMQTKEnJPx3ERgLf7O+LW2+SRT+KRZjonITMP7VzsmYs7K/0lt8PEfUXPWQ/r0LYIiEAxBMoU8svsB5hvB3BMnHgD2GON2Le0tFRMq6ZYSoJexloxiUhmOidF5jR2p8kT20gdEAERmBqBPEJ+IwCKdPh3TiDa9MhfH+yL3KybR55CLyM7MgxbLC9vimxsOi88h9uxaWXFHJobxMD7y8TMsaXogAiIQE0I5BHySDGO2LlkxT7i0Oiuugl5JnH1BoR/AVhcNP3nbThZhE9X2gkjXu7Bqn8OQx7cjkt8MpMvuRq+E4VPaiqJgAg0lkBZQv5kT6J/CcD7ve3Y1boJeRZxDUcQz+WFgN64/yxO0hRud06SiLMelrt9rjd4J8pcr9B3oqS1IWyvtkVABMojUJaQX2u9cE5BvA7AE2LV2ztQNyFnt+QVNp5PT9w9VMl3VFHcI9Pqqvnm8U8zdx293SS9eZBllnGjsqxyI9uqnSIgAqkJlCXknjynX62jkKcmnZCR4RT//VR8tQlFcySde27/gw58Dzj/voP5sWJOz3xTOSOFjm4wb9I5PObs5JLbSiIgArMnICGP6AMnaBTYJGGLOHXiXfTC7Rtj+2I5IpI0xAq4W/IDDzdte+XE9YUnss3jvPg0ecJytS0CIlA+AQl5wNiJlYtbcznu5mJQROpNd8Hgsre6ZnZ2bzYXdy7bPLd7165NH3W4Bc9M9MhTG2EzpvW2fZuz1qH8IiAC5RCQkAdcfUFzcesywggURPdQzpu6e83Bua7hxxsOomPWFw4bjYmsro5+1AE3mwt2fDawPN8m7RnnkeerQWeLgAiURUBCHpB1gla2R3717sF3MPmuExf3Hrnj6cVWaNMp3VvNPB4xwMH+dEXuKzqxTFZbRtlF26ryREAENghIyDdYDNecoJUWI+/1zP7uCcMXV/U/ZOzcfy6Du53+r4TEWS3DFmhFBESgTQQk5LPobSrz3Fw/lLLubmJSoRnD2b17k0vMC4vCHrPoKNUpAvUg0Aghdx70TEICk1TulJnxG04i37t3bExjkmrqMQRlpQiIQF4CtRdyp4l0ZsuaXRILOU/lUuZYrDogAiKQjUDthdyPHxc+u2Sc2JZaebaOVG4REIH2Eqi9kOdximO7nYXyCR2+0SrJ1S+l8mirxl1Tos/SXhEQgTYQqL2Qs5MKFTknzu4xS84iSXL1C608esg5k5KuKdFnaq8IiEAbCDRCyAvtKD9cQhGnoE89+D7aIt+kpGvK6FnaEgERaAuBZgh5kV6x7/5yPjdDLNw3w+SbNONrygwpqGoREIE4AvUX8jJUjmXSDZ6xgPudVkGTfPO0LgIiMEMC9RdyxR1mOHxUtQiIQBUI1F/Iy/DIq9AzskEEREAEUhIoW8h/GYABcHSaz0tM/GEJxR1SdreyiYAINJFAmUL+RAA3APhS6ULexJ5Rm0RABEQgJYEyhZwfXH4mgP0S8pS9oWwiIAIiMAGBsoT8HABvteGUcUK+xxqxb2lpaYIm6BQREAERaDeBPEJ+I4DbI/4o4p8G8JiUQj4Mn08cI293H6r1IiACLSeQR8iHAhysPB3A12xIhd74OoAvA3hskG/TpoS85aNRzRcBEZiIQBlCHgr0uNDKML+EfKI+1EkiIAItJyAhb/kAUPNFQATqT2AaQj70uFOsfN0atK/iS/7KqLqNZdjXxna3sc0cO2p3vf6NUzuVMhLgQG9jamO729hmjm21u43/wlvWZg3y9nS4+ro9fc2WtrW/29XLtrVt7ew2truNbW6zoLW1v1sp5HyIqY2pje1uY5s5ttXuNv4LV5tFQAREQAREQAREQAREQAREQAREQAREQAREIDuBTO9cz158pc54M4C7AKwB+ACAIytlXfHGvBDA5wHcA+DC4ouvZIl8/fRNAO4EcAeA11TSynKM6gL4HIAPlVO8Sq0qgczvXK9qQ1LatQvAvM17BQD+NTXxH/W9AE4EsAjgNgAnNbWxXrseB+BUu/0oAHe3pN1s8msBvFdC7o2Glqxmfud6g7i8GMB7GtSesCnPtR9GcfsvAsC/tqUPAjizBY0+DsDHALxAQt6C3vaamOWd695pjVm9DsB5jWnN5oa8FMA7vd0vB3Clt92G1ePtm0sf3YLG0ik7DcDzJeTN6+3C37leA0RJbXbmX2Jj5B23o4HLtgv5EQBuAfCSBvZt2KSzAVxld0rIQzoN3p74nesNYPIKAJ8CcHgD2pLUhDaHVhZsWIkx4zakywHcb18U9hUADwF4dxsarjaOEuCb4o4e3dXILc7i4GyGYxrZutFG8abuFwCc4N3sPHk0SyO3+CvrXQDe0sjWjW+UPPLxjBqboy1Czml49wG41f69o7E9OmjYWXbWBmevMJzUhnQGAGOnmLp+Joe2JAl5W3pa7RQBERABERABERABERABERABERABERABERABERABERABERABERABERABERABERABERABERABERABERABERABERCBBhL4f9w6VFuqcNXUAAAAAElFTkSuQmCC) ###Code ###Output _____no_output_____
FashionTrainingInception.ipynb
###Markdown Setup directorycloning and merging directories together. ###Code !git clone https://github.com/aryapei/In-shop-Clothes-From-Deepfashion.git !rsync -a ./In-shop-Clothes-From-Deepfashion/Img/MEN/ ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/ ###Output Cloning into 'In-shop-Clothes-From-Deepfashion'... remote: Enumerating objects: 4, done. remote: Counting objects: 100% (4/4), done. remote: Compressing objects: 100% (4/4), done. remote: Total 30676 (delta 0), reused 3 (delta 0), pack-reused 30672 Receiving objects: 100% (30676/30676), 397.23 MiB | 36.02 MiB/s, done. Resolving deltas: 100% (16/16), done. Checking out files: 100% (26451/26451), done. ###Markdown Define Neural NetworkCreate a ConvNet instance and remove last layer to implement transfert learning.:warning: do not forget to freeze pretrained model reduce training workload. ###Code #!/usr/bin/env python3 """ Low Cost Transfert Learning on CIBR with Inceptionv3 ConvNet Description: ============ see this script as a disappointment to me. Was hoping to correctly use ~~~InceptionV3~~~ VGG16 model by freezing the layers and fitting data generator to train this ConvNet. The current script collect extracted features from ~~~InceptionV3~~~ VGG16 and names to write Hierarchical Data Format file. Required setup: =============== $ git clone https://github.com/aryapei/In-shop-Clothes-From-Deepfashion.git $ rsync -a ./In-shop-Clothes-From-Deepfashion/Img/MEN/ ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/ Thoses commands clone and merge current Fashion dataset hosted at https://github.com/aryapei/In-shop-Clothes-From-Deepfashion in the same folder ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/ """ import numpy as np from numpy import linalg as LA import os import h5py from tensorflow.keras.applications import InceptionResNetV2 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input import matplotlib.pyplot as plt import matplotlib.image as mpimg from glob import glob img_side_size = 299 class ConvNet: def __init__(self): self.model = InceptionResNetV2(input_shape=(img_side_size, img_side_size, 3), weights="imagenet", include_top=False, pooling="max") self.model.predict(np.zeros((1, img_side_size, img_side_size, 3))) ''' Use inceptionv3 model to extract features Output normalized feature vector ''' def extract_feat(self, img_path): img = image.load_img(img_path, target_size=(img_side_size,img_side_size)) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = preprocess_input(img) feat = self.model.predict(img) norm_feat = feat[0]/LA.norm(feat[0]) return norm_feat if __name__ == "__main__": img_dir = "/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN" img_pattern = f"{img_dir}/**/**/*.jpg" print(img_pattern) img_list = glob(img_pattern) print(f"{' feature extraction starts ':=^120}") feats = [] names = [] model = ConvNet() img_list_len = len(img_list) for i, img_path in enumerate(img_list): norm_feat = model.extract_feat(img_path) feats.append(norm_feat) img_name = '/'.join(img_path.split('/')[-5:]) names.append(img_name) print(f"({i}/{img_list_len}) feat extraction of {img_name}.") feats = np.array(feats) names = np.string_(names) print(f"{' writing feature extraction results ':=^120}") h5f = h5py.File("featureCNN.h5", 'w') h5f.create_dataset('dataset_feat', data=feats) h5f.create_dataset('dataset_name', data=names) h5f.close() import numpy as np from numpy import linalg as LA import os import h5py from tensorflow.keras.applications import InceptionResNetV2 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input import matplotlib.pyplot as plt import matplotlib.image as mpimg from glob import glob img_side_size = 299 class ConvNet: def __init__(self): self.model = InceptionResNetV2(input_shape=(img_side_size, img_side_size, 3), weights="imagenet", include_top=False, pooling="max") self.model.predict(np.zeros((1, img_side_size, img_side_size, 3))) ''' Use inceptionv3 model to extract features Output normalized feature vector ''' def extract_feat(self, img_path): img = image.load_img(img_path, target_size=(img_side_size,img_side_size)) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = preprocess_input(img) feat = self.model.predict(img) norm_feat = feat[0]/LA.norm(feat[0]) return norm_feat # Read the produced files : h5f = h5py.File('./featureCNN.h5', 'r') feats = h5f['dataset_feat'][:] imgNames = h5f['dataset_name'][:] h5f.close() print(f"{' searching starts ':=^120}") queryDir = '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_1_front.jpg' queryImg = mpimg.imread(queryDir) plt.figure() plt.subplot(2, 1, 1) plt.imshow(queryImg) plt.title("Query Image") plt.axis('off') model = ConvNet() queryVec = model.extract_feat(queryDir) scores = np.dot(queryVec, feats.T) rank_ID = np.argsort(scores)[::-1] rank_score = scores[rank_ID] # number of top retrieved images to show maxres = 10 local = "/content/In-shop-Clothes-From-Deepfashion/" distant = "https://raw.githubusercontent.com/aryapei/In-shop-Clothes-From-Deepfashion/master/" imlist = [f"{local}{imgNames[index].decode('utf-8')}" for i,index in enumerate(rank_ID[0:maxres])] print("top %d images in order are: " % maxres, imlist) plt.imshow(queryImg) plt.title("search input") plt.axis('off') plt.show() for i, im in enumerate(imlist): image = mpimg.imread(im) plt.imshow(image) plt.title("search output %d" % (i + 1)) plt.axis('off') plt.show() ###Output =================================================== searching starts =================================================== top 10 images in order are: ['/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_1_front.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Jackets_Coats/id_00003844/01_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Jackets_Coats/id_00001053/02_2_side.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Dresses/id_00001323/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_2_side.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00001967/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Dresses/id_00000825/01_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00006965/02_2_side.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00004785/01_7_additional.jpg']
[20190921]matplotlib.ipynb
###Markdown --- Basic Attributesalpha : ํˆฌ๋ช…๋„ kind : ๊ทธ๋ž˜ํ”„ ์ข…๋ฅ˜ 'line', 'bar', 'barh', 'kde' logy : Y์ถ•์— ๋Œ€ํ•ด Log scaling use_index : ๊ฐ์ฒด์˜ ์ƒ‰์ธ์„ ๋ˆˆ๊ธˆ ์ด๋ฆ„์œผ๋กœ ์‚ฌ์šฉํ• ์ง€ ์—ฌ๋ถ€ rot : ๋ˆˆ๊ธˆ ์ด๋ฆ„ ๋Œ๋ฆฌ๊ธฐ (rotating) 0 ~ 360 xticks, yticks : x, y์ถ•์œผ๋กœ ์‚ฌ์šฉํ•  ๊ฐ’ xlim, ylim : X, Y์ถ•์˜ ํ•œ๊ณ„ grid : ์ถ•์˜ ๊ทธ๋ฆฌ๋“œ๋ฅผ ํ‘œํ˜„ํ• ์ง€ ์—ฌ๋ถ€subplots : ๊ฐ column์— ๋…๋ฆฝ๋œ subplot ๊ทธ๋ฆฌ๊ธฐ sharex, sharey : subplots=True ์ด๋ฉด ๊ฐ™์€ X,Y์ถ•์„ ๊ณต์œ ํ•˜๊ณ  ๋ˆˆ๊ธˆ๊ณผ ํ•œ๊ณ„๋ฅผ ์—ฐ๊ฒฐ figsize : ์ƒ์„ฑ๋  ๊ทธ๋ž˜ํ”„์˜ ํฌ๊ธฐ๋ฅผ tuple๋กœ ์ง€์ • title : ๊ทธ๋ž˜ํ”„์˜ ์ œ๋ชฉ ์ง€์ • legend : subplot์˜ ๋ฒ”๋ก€ ์ง€์ • sort_columns : column์„ ์•ŒํŒŒ๋ฒณ ์ˆœ์„œ๋กœ ๊ทธ๋ฆฐ๋‹ค. ์ ์„  ๊ทธ๋ž˜ํ”„ ###Code data = np.random.randn(50).cumsum() data plt.plot(data) plt.show() ###Output _____no_output_____ ###Markdown ์—ฌ๋Ÿฌ๊ทธ๋ž˜ํ”„ ๊ทธ๋ฆฌ๊ธฐ ###Code plt.subplot(1,2,1) plt.subplot(1,2,2) plt.show() ###Output _____no_output_____ ###Markdown Multi Graph ๊ทธ๋ฆฌ๊ธฐ ###Code hist_data = np.random.randn(100) scat_data = np.arange(30) plt.subplot(2,2,1) plt.plot(data) plt.subplot(2,2,2) plt.hist(hist_data,bins=20) plt.subplot(2,1,2) plt.scatter(scat_data, np.arange(30) + 3 * np.random.randn(30)) plt.show() ###Output _____no_output_____ ###Markdown ๊ทธ๋ž˜ํ”„ ์„  ์˜ต์…˜ - ๊ทธ๋ž˜ํ”„๋ฅผ ๊ทธ๋ฆด ๋•Œ ํ‘œ์‹œ ๋˜๋Š” ์ƒ‰์ด๋‚˜ ๋งˆ์ปค ํŒจํ„ด์„ ๋ฐ”๊พธ๋Š” ๊ฒƒ ํ™•์ธ - ์ƒ‰์ƒ: b(ํŒŒ๋ž€์ƒ‰), g(์ดˆ๋ก์ƒ‰), r(๋นจ๊ฐ„์ƒ‰), c(์ฒญ๋ก์ƒ‰), y(๋…ธ๋ž€์ƒ‰), k(๊ฒ€์€์ƒ‰), w(ํฐ์ƒ‰) - ๋งˆ์ปค: o(์›), v(์—ญ์‚ผ๊ฐํ˜•), ^(์‚ผ๊ฐํ˜•), s(๋„ค๋ชจ), +(ํ”Œ๋Ÿฌ์Šค), .(์ ) ###Code plt.plot(data, 'y+') plt.show() plt.plot(data, 'v') ###Output _____no_output_____ ###Markdown ๊ทธ๋ž˜ํ”„ ์‚ฌ์ด์ฆˆ ์กฐ์ ˆ ###Code plt.figure(figsize=(10,10)) plt.plot(data, 'k+') plt.show() ###Output _____no_output_____ ###Markdown ๊ทธ๋ž˜ํ”„ ๊ฒน์น˜๊ธฐ + legend ๋‹ฌ๊ธฐ ###Code data = np.random.randn(30).cumsum() plt.plot(data, 'k--', label='Default') plt.plot(data, 'k-', drawstyle='steps-post', label='steps_post') plt.legend() plt.show() ###Output _____no_output_____ ###Markdown ์ด๋ฆ„ ๋‹ฌ๊ธฐ ###Code plt.plot(np.random.randn(1000).cumsum()) plt.title('Random Graph') plt.xlabel('Stages') plt.ylabel('Values') plt.show() ###Output _____no_output_____ ###Markdown ์ €์žฅํ•˜๊ธฐ ###Code plt.savefig('saved_graph.svg') ###Output _____no_output_____
inference_engine/efficientdet_pytorch/lib/tutorial/train_shape.ipynb
###Markdown EfficientDet Training On A Custom Dataset View source on github Run in Google Colab This tutorial will show you how to train a custom dataset. For the sake of simplicity, I generated a dataset of different shapes, like rectangles, triangles, circles. Please enable GPU support to accelerate on notebook setting if you are using colab. 0. Install Requirements ###Code !pip install pycocotools numpy==1.16.0 opencv-python tqdm tensorboard tensorboardX pyyaml webcolors matplotlib !pip install torch==1.4.0 !pip install torchvision==0.5.0 ###Output Requirement already satisfied: pycocotools in /usr/local/lib/python3.6/dist-packages (2.0.0) Collecting numpy==1.16.0 [?25l Downloading https://files.pythonhosted.org/packages/7b/74/54c5f9bb9bd4dae27a61ec1b39076a39d359b3fb7ba15da79ef23858a9d8/numpy-1.16.0-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 17.3MB 215kB/s [?25hRequirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (4.1.2.30) Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (4.41.1) Requirement already satisfied: tensorboard in /usr/local/lib/python3.6/dist-packages (2.2.2) Collecting tensorboardX [?25l Downloading https://files.pythonhosted.org/packages/35/f1/5843425495765c8c2dd0784a851a93ef204d314fc87bcc2bbb9f662a3ad1/tensorboardX-2.0-py2.py3-none-any.whl (195kB)  |โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 204kB 38.5MB/s [?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (3.13) Collecting webcolors Downloading https://files.pythonhosted.org/packages/12/05/3350559de9714b202e443a9e6312937341bd5f79f4e4f625744295e7dd17/webcolors-1.11.1-py3-none-any.whl Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (3.2.1) Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorboard) (0.34.2) Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (2.23.0) Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.6.0.post3) Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (47.1.1) Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.12.0) Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (0.4.1) Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (3.10.0) Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.0.1) Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (0.9.0) Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (3.2.2) Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.29.0) Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.17.2) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (0.10.0) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.4.7) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.2.0) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.8.1) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (2.9) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (2020.4.5.2) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (1.24.3) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (3.0.4) Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard) (1.3.0) Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard) (1.6.1) Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard) (0.2.8) Requirement already satisfied: rsa<5,>=3.1.4; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard) (4.6) Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard) (4.1.0) Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard) (3.1.0) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard) (3.1.0) Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard) (0.4.8) ERROR: umap-learn 0.4.4 has requirement numpy>=1.17, but you'll have numpy 1.16.0 which is incompatible. ERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible. ERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible. Installing collected packages: numpy, tensorboardX, webcolors Found existing installation: numpy 1.18.5 Uninstalling numpy-1.18.5: Successfully uninstalled numpy-1.18.5 Successfully installed numpy-1.16.0 tensorboardX-2.0 webcolors-1.11.1 ###Markdown 1. Prepare Custom Dataset/Pretrained Weights (Skip this part if you already have datasets and weights of your own) ###Code import os import sys if "projects" not in os.getcwd(): !git clone --depth 1 https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch os.chdir('Yet-Another-EfficientDet-Pytorch') sys.path.append('.') else: !git pull # download and unzip dataset ! mkdir datasets ! wget https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.1/dataset_shape.tar.gz ! tar xzf dataset_shape.tar.gz # download pretrained weights ! mkdir weights ! wget https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.0/efficientdet-d0.pth -O weights/efficientdet-d0.pth # prepare project file projects/shape.yml # showing its contents here ! cat projects/shape.yml ###Output Cloning into 'Yet-Another-EfficientDet-Pytorch'... remote: Enumerating objects: 43, done. remote: Counting objects: 100% (43/43), done. remote: Compressing objects: 100% (39/39), done. remote: Total 43 (delta 3), reused 22 (delta 1), pack-reused 0 Unpacking objects: 100% (43/43), done. --2020-06-18 02:41:28-- https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.1/dataset_shape.tar.gz Resolving github.com (github.com)... 140.82.118.4 Connecting to github.com (github.com)|140.82.118.4|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/b4de2a00-7e55-11ea-89ac-50cd8071e6ce?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024128Z&X-Amz-Expires=300&X-Amz-Signature=2584a0dac8cf892da56cdf5d4845131e4346c765c3b6afae35879931b65f4e4e&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Ddataset_shape.tar.gz&response-content-type=application%2Foctet-stream [following] --2020-06-18 02:41:28-- https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/b4de2a00-7e55-11ea-89ac-50cd8071e6ce?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024128Z&X-Amz-Expires=300&X-Amz-Signature=2584a0dac8cf892da56cdf5d4845131e4346c765c3b6afae35879931b65f4e4e&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Ddataset_shape.tar.gz&response-content-type=application%2Foctet-stream Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.217.37.92 Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.217.37.92|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 5770263 (5.5M) [application/octet-stream] Saving to: โ€˜dataset_shape.tar.gzโ€™ dataset_shape.tar.g 100%[===================>] 5.50M 7.61MB/s in 0.7s 2020-06-18 02:41:29 (7.61 MB/s) - โ€˜dataset_shape.tar.gzโ€™ saved [5770263/5770263] --2020-06-18 02:41:34-- https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.0/efficientdet-d0.pth Resolving github.com (github.com)... 140.82.118.4 Connecting to github.com (github.com)|140.82.118.4|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/9b9d2100-791d-11ea-80b2-d35899cf95fe?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024135Z&X-Amz-Expires=300&X-Amz-Signature=c4d613ce694cbb959c9b5bec39f9e7ae9e57e90262ffee0f8d7c8c847fa1f4e5&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Defficientdet-d0.pth&response-content-type=application%2Foctet-stream [following] --2020-06-18 02:41:35-- https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/9b9d2100-791d-11ea-80b2-d35899cf95fe?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024135Z&X-Amz-Expires=300&X-Amz-Signature=c4d613ce694cbb959c9b5bec39f9e7ae9e57e90262ffee0f8d7c8c847fa1f4e5&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Defficientdet-d0.pth&response-content-type=application%2Foctet-stream Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.216.82.216 Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.216.82.216|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 15862583 (15M) [application/octet-stream] Saving to: โ€˜weights/efficientdet-d0.pthโ€™ weights/efficientde 100%[===================>] 15.13M 15.1MB/s in 1.0s 2020-06-18 02:41:36 (15.1 MB/s) - โ€˜weights/efficientdet-d0.pthโ€™ saved [15862583/15862583] project_name: shape # also the folder name of the dataset that under data_path folder train_set: train val_set: val num_gpus: 1 # mean and std in RGB order, actually this part should remain unchanged as long as your dataset is similar to coco. mean: [0.485, 0.456, 0.406] std: [0.229, 0.224, 0.225] # this anchor is adapted to the dataset anchors_scales: '[2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]' anchors_ratios: '[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]' obj_list: ['rectangle', 'circle'] ###Markdown 2. Training ###Code # consider this is a simple dataset, train head will be enough. ! python train.py -c 0 -p shape --head_only True --lr 1e-3 --batch_size 32 --load_weights weights/efficientdet-d0.pth --num_epochs 50 # the loss will be high at first # don't panic, be patient, # just wait for a little bit longer ###Output loading annotations into memory... Done (t=0.02s) creating index... index created! loading annotations into memory... Done (t=0.00s) creating index... index created! [Warning] Ignoring Error(s) in loading state_dict for EfficientDetBackbone: size mismatch for classifier.header.pointwise_conv.conv.weight: copying a param with shape torch.Size([810, 64, 1, 1]) from checkpoint, the shape in current model is torch.Size([18, 64, 1, 1]). size mismatch for classifier.header.pointwise_conv.conv.bias: copying a param with shape torch.Size([810]) from checkpoint, the shape in current model is torch.Size([18]). [Warning] Don't panic if you see this, this might be because you load a pretrained weights with different number of classes. The rest of the weights should be loaded already. [Info] loaded weights: efficientdet-d0.pth, resuming checkpoint from step: 0 [Info] freezed backbone Step: 27. Epoch: 0/50. Iteration: 28/28. Cls loss: 26.29772. Reg loss: 0.01289. Total loss: 26.31061: 100% 28/28 [00:46<00:00, 1.66s/it] Val. Epoch: 0/50. Classification loss: 12.20426. Regression loss: 0.01610. Total loss: 12.22037 Step: 55. Epoch: 1/50. Iteration: 28/28. Cls loss: 3.66639. Reg loss: 0.01443. Total loss: 3.68082: 100% 28/28 [00:46<00:00, 1.65s/it] Val. Epoch: 1/50. Classification loss: 3.10739. Regression loss: 0.01396. Total loss: 3.12135 Step: 83. Epoch: 2/50. Iteration: 28/28. Cls loss: 2.61804. Reg loss: 0.01078. Total loss: 2.62881: 100% 28/28 [00:46<00:00, 1.66s/it] Val. Epoch: 2/50. Classification loss: 1.99466. Regression loss: 0.01278. Total loss: 2.00744 Step: 111. Epoch: 3/50. Iteration: 28/28. Cls loss: 1.44927. Reg loss: 0.01206. Total loss: 1.46133: 100% 28/28 [00:46<00:00, 1.66s/it] Val. Epoch: 3/50. Classification loss: 1.42343. Regression loss: 0.01165. Total loss: 1.43508 Step: 139. Epoch: 4/50. Iteration: 28/28. Cls loss: 1.44247. Reg loss: 0.01195. Total loss: 1.45442: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 4/50. Classification loss: 1.15894. Regression loss: 0.01040. Total loss: 1.16934 Step: 167. Epoch: 5/50. Iteration: 28/28. Cls loss: 0.96989. Reg loss: 0.01074. Total loss: 0.98064: 100% 28/28 [00:46<00:00, 1.66s/it] Val. Epoch: 5/50. Classification loss: 0.94637. Regression loss: 0.00966. Total loss: 0.95603 Step: 195. Epoch: 6/50. Iteration: 28/28. Cls loss: 0.90316. Reg loss: 0.00981. Total loss: 0.91297: 100% 28/28 [00:46<00:00, 1.66s/it] Val. Epoch: 6/50. Classification loss: 0.80626. Regression loss: 0.00944. Total loss: 0.81570 Step: 223. Epoch: 7/50. Iteration: 28/28. Cls loss: 0.83105. Reg loss: 0.01052. Total loss: 0.84157: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 7/50. Classification loss: 0.69999. Regression loss: 0.00907. Total loss: 0.70907 Step: 251. Epoch: 8/50. Iteration: 28/28. Cls loss: 0.68107. Reg loss: 0.01090. Total loss: 0.69197: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 8/50. Classification loss: 0.62273. Regression loss: 0.00883. Total loss: 0.63156 Step: 279. Epoch: 9/50. Iteration: 28/28. Cls loss: 0.63515. Reg loss: 0.01228. Total loss: 0.64743: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 9/50. Classification loss: 0.55948. Regression loss: 0.00851. Total loss: 0.56798 Step: 307. Epoch: 10/50. Iteration: 28/28. Cls loss: 0.50954. Reg loss: 0.01053. Total loss: 0.52007: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 10/50. Classification loss: 0.50945. Regression loss: 0.00836. Total loss: 0.51781 Step: 335. Epoch: 11/50. Iteration: 28/28. Cls loss: 0.52033. Reg loss: 0.00733. Total loss: 0.52766: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 11/50. Classification loss: 0.46788. Regression loss: 0.00800. Total loss: 0.47587 Step: 363. Epoch: 12/50. Iteration: 28/28. Cls loss: 0.49584. Reg loss: 0.00927. Total loss: 0.50511: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 12/50. Classification loss: 0.43143. Regression loss: 0.00792. Total loss: 0.43935 Step: 391. Epoch: 13/50. Iteration: 28/28. Cls loss: 0.45326. Reg loss: 0.00893. Total loss: 0.46219: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 13/50. Classification loss: 0.40211. Regression loss: 0.00764. Total loss: 0.40974 Step: 419. Epoch: 14/50. Iteration: 28/28. Cls loss: 0.40421. Reg loss: 0.00882. Total loss: 0.41303: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 14/50. Classification loss: 0.37800. Regression loss: 0.00736. Total loss: 0.38537 Step: 447. Epoch: 15/50. Iteration: 28/28. Cls loss: 0.38576. Reg loss: 0.00615. Total loss: 0.39191: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 15/50. Classification loss: 0.35435. Regression loss: 0.00746. Total loss: 0.36181 Step: 475. Epoch: 16/50. Iteration: 28/28. Cls loss: 0.38551. Reg loss: 0.01182. Total loss: 0.39733: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 16/50. Classification loss: 0.33601. Regression loss: 0.00737. Total loss: 0.34338 Step: 499. Epoch: 17/50. Iteration: 24/28. Cls loss: 0.35644. Reg loss: 0.00668. Total loss: 0.36312: 82% 23/28 [00:41<00:05, 1.15s/it]checkpoint... Step: 503. Epoch: 17/50. Iteration: 28/28. Cls loss: 0.35166. Reg loss: 0.00812. Total loss: 0.35978: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 17/50. Classification loss: 0.31798. Regression loss: 0.00725. Total loss: 0.32523 Step: 531. Epoch: 18/50. Iteration: 28/28. Cls loss: 0.35137. Reg loss: 0.01101. Total loss: 0.36238: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 18/50. Classification loss: 0.30364. Regression loss: 0.00718. Total loss: 0.31082 Step: 559. Epoch: 19/50. Iteration: 28/28. Cls loss: 0.29872. Reg loss: 0.00653. Total loss: 0.30525: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 19/50. Classification loss: 0.29044. Regression loss: 0.00733. Total loss: 0.29776 Step: 587. Epoch: 20/50. Iteration: 28/28. Cls loss: 0.30086. Reg loss: 0.00810. Total loss: 0.30896: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 20/50. Classification loss: 0.27783. Regression loss: 0.00728. Total loss: 0.28511 Step: 615. Epoch: 21/50. Iteration: 28/28. Cls loss: 0.34610. Reg loss: 0.00809. Total loss: 0.35419: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 21/50. Classification loss: 0.26462. Regression loss: 0.00711. Total loss: 0.27173 Step: 643. Epoch: 22/50. Iteration: 28/28. Cls loss: 0.28175. Reg loss: 0.00807. Total loss: 0.28981: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 22/50. Classification loss: 0.25356. Regression loss: 0.00716. Total loss: 0.26071 Step: 671. Epoch: 23/50. Iteration: 28/28. Cls loss: 0.27373. Reg loss: 0.00875. Total loss: 0.28248: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 23/50. Classification loss: 0.24350. Regression loss: 0.00737. Total loss: 0.25087 Step: 699. Epoch: 24/50. Iteration: 28/28. Cls loss: 0.25727. Reg loss: 0.00815. Total loss: 0.26542: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 24/50. Classification loss: 0.23465. Regression loss: 0.00712. Total loss: 0.24177 Step: 727. Epoch: 25/50. Iteration: 28/28. Cls loss: 0.23017. Reg loss: 0.01109. Total loss: 0.24125: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 25/50. Classification loss: 0.22561. Regression loss: 0.00716. Total loss: 0.23277 Step: 755. Epoch: 26/50. Iteration: 28/28. Cls loss: 0.22237. Reg loss: 0.00591. Total loss: 0.22828: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 26/50. Classification loss: 0.21848. Regression loss: 0.00694. Total loss: 0.22542 Step: 783. Epoch: 27/50. Iteration: 28/28. Cls loss: 0.25054. Reg loss: 0.00917. Total loss: 0.25971: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 27/50. Classification loss: 0.21120. Regression loss: 0.00699. Total loss: 0.21819 Step: 811. Epoch: 28/50. Iteration: 28/28. Cls loss: 0.22907. Reg loss: 0.00829. Total loss: 0.23737: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 28/50. Classification loss: 0.20494. Regression loss: 0.00701. Total loss: 0.21195 Step: 839. Epoch: 29/50. Iteration: 28/28. Cls loss: 0.26674. Reg loss: 0.00852. Total loss: 0.27526: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 29/50. Classification loss: 0.19854. Regression loss: 0.00670. Total loss: 0.20523 Step: 867. Epoch: 30/50. Iteration: 28/28. Cls loss: 0.19063. Reg loss: 0.00593. Total loss: 0.19656: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 30/50. Classification loss: 0.19303. Regression loss: 0.00679. Total loss: 0.19982 Step: 895. Epoch: 31/50. Iteration: 28/28. Cls loss: 0.23191. Reg loss: 0.00678. Total loss: 0.23869: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 31/50. Classification loss: 0.18698. Regression loss: 0.00675. Total loss: 0.19373 Step: 923. Epoch: 32/50. Iteration: 28/28. Cls loss: 0.18452. Reg loss: 0.00685. Total loss: 0.19137: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 32/50. Classification loss: 0.18236. Regression loss: 0.00679. Total loss: 0.18915 Step: 951. Epoch: 33/50. Iteration: 28/28. Cls loss: 0.20275. Reg loss: 0.00758. Total loss: 0.21033: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 33/50. Classification loss: 0.17713. Regression loss: 0.00692. Total loss: 0.18405 Step: 979. Epoch: 34/50. Iteration: 28/28. Cls loss: 0.18318. Reg loss: 0.00577. Total loss: 0.18895: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 34/50. Classification loss: 0.17203. Regression loss: 0.00657. Total loss: 0.17860 Step: 999. Epoch: 35/50. Iteration: 20/28. Cls loss: 0.18499. Reg loss: 0.00838. Total loss: 0.19337: 68% 19/28 [00:37<00:10, 1.17s/it]checkpoint... Step: 1007. Epoch: 35/50. Iteration: 28/28. Cls loss: 0.18154. Reg loss: 0.00630. Total loss: 0.18784: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 35/50. Classification loss: 0.16700. Regression loss: 0.00666. Total loss: 0.17366 Step: 1035. Epoch: 36/50. Iteration: 28/28. Cls loss: 0.18250. Reg loss: 0.00611. Total loss: 0.18861: 100% 28/28 [00:47<00:00, 1.68s/it] Val. Epoch: 36/50. Classification loss: 0.16309. Regression loss: 0.00679. Total loss: 0.16989 Step: 1063. Epoch: 37/50. Iteration: 28/28. Cls loss: 0.15622. Reg loss: 0.00623. Total loss: 0.16245: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 37/50. Classification loss: 0.15933. Regression loss: 0.00666. Total loss: 0.16599 Step: 1091. Epoch: 38/50. Iteration: 28/28. Cls loss: 0.14960. Reg loss: 0.00556. Total loss: 0.15515: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 38/50. Classification loss: 0.15517. Regression loss: 0.00683. Total loss: 0.16201 Step: 1119. Epoch: 39/50. Iteration: 28/28. Cls loss: 0.17928. Reg loss: 0.00657. Total loss: 0.18585: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 39/50. Classification loss: 0.15171. Regression loss: 0.00657. Total loss: 0.15828 Step: 1147. Epoch: 40/50. Iteration: 28/28. Cls loss: 0.17436. Reg loss: 0.00468. Total loss: 0.17904: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 40/50. Classification loss: 0.14942. Regression loss: 0.00667. Total loss: 0.15609 Step: 1175. Epoch: 41/50. Iteration: 28/28. Cls loss: 0.16362. Reg loss: 0.00781. Total loss: 0.17143: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 41/50. Classification loss: 0.14597. Regression loss: 0.00686. Total loss: 0.15283 Step: 1203. Epoch: 42/50. Iteration: 28/28. Cls loss: 0.17241. Reg loss: 0.00837. Total loss: 0.18078: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 42/50. Classification loss: 0.14308. Regression loss: 0.00662. Total loss: 0.14969 Step: 1231. Epoch: 43/50. Iteration: 28/28. Cls loss: 0.17507. Reg loss: 0.00802. Total loss: 0.18309: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 43/50. Classification loss: 0.13933. Regression loss: 0.00666. Total loss: 0.14599 Step: 1259. Epoch: 44/50. Iteration: 28/28. Cls loss: 0.17234. Reg loss: 0.00580. Total loss: 0.17814: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 44/50. Classification loss: 0.13601. Regression loss: 0.00647. Total loss: 0.14247 Step: 1287. Epoch: 45/50. Iteration: 28/28. Cls loss: 0.16627. Reg loss: 0.00595. Total loss: 0.17222: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 45/50. Classification loss: 0.13402. Regression loss: 0.00653. Total loss: 0.14055 Step: 1315. Epoch: 46/50. Iteration: 28/28. Cls loss: 0.17035. Reg loss: 0.00682. Total loss: 0.17717: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 46/50. Classification loss: 0.13196. Regression loss: 0.00638. Total loss: 0.13834 Step: 1343. Epoch: 47/50. Iteration: 28/28. Cls loss: 0.12934. Reg loss: 0.00527. Total loss: 0.13461: 100% 28/28 [00:46<00:00, 1.68s/it] Val. Epoch: 47/50. Classification loss: 0.12878. Regression loss: 0.00664. Total loss: 0.13542 Step: 1371. Epoch: 48/50. Iteration: 28/28. Cls loss: 0.12199. Reg loss: 0.00390. Total loss: 0.12589: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 48/50. Classification loss: 0.12630. Regression loss: 0.00681. Total loss: 0.13311 Step: 1399. Epoch: 49/50. Iteration: 28/28. Cls loss: 0.13337. Reg loss: 0.00523. Total loss: 0.13859: 100% 28/28 [00:46<00:00, 1.67s/it] Val. Epoch: 49/50. Classification loss: 0.12423. Regression loss: 0.00635. Total loss: 0.13058 ###Markdown 3. Evaluation ###Code ! python coco_eval.py -c 0 -p shape -w logs/shape/efficientdet-d0_49_1400.pth ###Output running coco-style evaluation on project shape, weights logs/shape/efficientdet-d0_49_1400.pth... loading annotations into memory... Done (t=0.00s) creating index... index created! 100% 100/100 [00:08<00:00, 11.80it/s] Loading and preparing results... DONE (t=0.63s) creating index... index created! BBox Running per image evaluation... Evaluate annotation type *bbox* DONE (t=1.46s). Accumulating evaluation results... DONE (t=0.14s). Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.781 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.947 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.868 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.794 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.740 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.470 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.841 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.843 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.850 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.819 ###Markdown 4. Visualize ###Code import torch from torch.backends import cudnn from backbone import EfficientDetBackbone import cv2 import matplotlib.pyplot as plt import numpy as np from efficientdet.utils import BBoxTransform, ClipBoxes from utils.utils import preprocess, invert_affine, postprocess compound_coef = 0 force_input_size = None # set None to use default size img_path = 'datasets/shape/val/999.jpg' threshold = 0.2 iou_threshold = 0.2 use_cuda = True use_float16 = False cudnn.fastest = True cudnn.benchmark = True obj_list = ['rectangle', 'circle'] # tf bilinear interpolation is different from any other's, just make do input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536] input_size = input_sizes[compound_coef] if force_input_size is None else force_input_size ori_imgs, framed_imgs, framed_metas = preprocess(img_path, max_size=input_size) if use_cuda: x = torch.stack([torch.from_numpy(fi).cuda() for fi in framed_imgs], 0) else: x = torch.stack([torch.from_numpy(fi) for fi in framed_imgs], 0) x = x.to(torch.float32 if not use_float16 else torch.float16).permute(0, 3, 1, 2) model = EfficientDetBackbone(compound_coef=compound_coef, num_classes=len(obj_list), # replace this part with your project's anchor config ratios=[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)], scales=[2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]) model.load_state_dict(torch.load('logs/shape/efficientdet-d0_49_1400.pth')) model.requires_grad_(False) model.eval() if use_cuda: model = model.cuda() if use_float16: model = model.half() with torch.no_grad(): features, regression, classification, anchors = model(x) regressBoxes = BBoxTransform() clipBoxes = ClipBoxes() out = postprocess(x, anchors, regression, classification, regressBoxes, clipBoxes, threshold, iou_threshold) out = invert_affine(framed_metas, out) for i in range(len(ori_imgs)): if len(out[i]['rois']) == 0: continue for j in range(len(out[i]['rois'])): (x1, y1, x2, y2) = out[i]['rois'][j].astype(np.int) cv2.rectangle(ori_imgs[i], (x1, y1), (x2, y2), (255, 255, 0), 2) obj = obj_list[out[i]['class_ids'][j]] score = float(out[i]['scores'][j]) cv2.putText(ori_imgs[i], '{}, {:.3f}'.format(obj, score), (x1, y1 + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1) plt.imshow(ori_imgs[i]) ###Output _____no_output_____
get_effechecka_data.ipynb
###Markdown Lists of Species by Country This code uses the effechecka API to get a list of taxa that have been reported in each country. The API taxes a polygon (points are lat/lon coordinates) and returns observations within that polygon from several species occurrance databases. To use this notebook, you need a list of geonames ids and a json file with geonames polygons. In the cell below, we import the necessary libraries and data files. The input file, test_country.txt, contains the geonames ID for the country. There is only two in the file at any given time. This helps to avoid overloading the server. The code is written so that more countries can be included if in the future the server can handle more queries at once. The file low_res_countries.json are the polygons from geonames that have been reduced in resolution so they can fit in the URL API call. ###Code import urllib.request import urllib.error import json in_file = open('test_country.txt', 'r') shape_file = open('low_res_countries.json','r') shapes = json.load(shape_file) ###Output _____no_output_____ ###Markdown The code below takes the country-shaped polygon and forms the URL to query the API. Each query will take two hits. The first gets effechecka started on the query and the second (done a day later) will grab the results. If the query has been submitted before, then you will not need to do the second query. The json results returned by the API are written to the out_files. Each country has a separate out_file. ###Code out_files = ['output1.tsv','output2.tsv','output3.tsv'] #all of the code to line 47 is about reading the input json and forming the URL for the API query for index, line in enumerate(in_file): line = line.strip() row = line.split('\t') geonamesid = row[2] iso = row[1] print(geonamesid) #print the id so you know what country you are on country = row[0] polygons = shapes['features'] for polygon in polygons: geoid = polygon['properties']['geoNameId'] if geonamesid == geoid: #use the geonames id to find the right polygon in the shapes file shape_type = polygon['geometry']['type'] if shape_type == 'Polygon': #some country polygons are multiple polygons. Need a different procedure p = [] wkt = polygon['geometry']['coordinates'][0] for i in wkt: z = [] lat = i[1] lon = i[0] z.append(str(lon)) z.append(str(lat)) m = '%20'.join(z) p.append(str(m)) q = '%2C%20'.join(p) url = 'http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=POLYGON((' + str(q) + '))' z = 'POLYGON((' + str(q) + '))' elif shape_type == 'MultiPolygon': q = '' url = 'http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=GEOMETRYCOLLECTION%28POLYGON%20%28%28' wkt = polygon['geometry']['coordinates'] for k in wkt: k = k[0] if len(k) == 0: #the process of shortening the polygons left a lot of blank coordinates. They get removed here. continue p = [] for i in k: z = [] for j in i: z.append(str(j)) m = '%20'.join(z) p.append(str(m)) q = q + '%2C%20'.join(p) + '%29%29%2CPOLYGON%20%28%28' url = url + q url = url.strip('%2CPOLYGON%20%28%28') url = url + '%29' z = 'GEOMETRYCOLLECTION%28POLYGON%20%28%28' + q.strip('%2CPOLYGON%20%28%28') z = z + '%29' print(url) try: urllib.request.urlretrieve(url, out_files[index]) #This is where the url is submitted to the API and results are read except urllib.error.URLError as e: print(e.reason) with open(out_files[index], 'a') as u: u.write('\ncountry\t' + country + '\n') u.write('country_uri\t' + geonamesid + '\n') u.write('polygon\t' + z + '\n') u.close() print('complete') #make sure the code gets to the end ###Output 99237 http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=GEOMETRYCOLLECTION%28POLYGON%20%28%2848.567%2029.916%2C%2048.21%2030.033%2C%2047.95%2030.061%2C%2047.709%2030.096%2C%2047.181%2030.026%2C%2046.556%2029.103%2C%2044.72%2029.206%2C%2043.611%2030.022%2C%2041.444%2031.378%2C%2039.203%2032.158%2C%2039.26%2032.356%2C%2038.986%2032.478%2C%2038.796%2033.368%2C%2041.238%2034.785%2C%2041.283%2035.486%2C%2041.381%2035.835%2C%2041.295%2036.356%2C%2041.828%2036.593%2C%2042.364%2037.109%2C%2042.799%2037.377%2C%2043.167%2037.374%2C%2043.8%2037.23%2C%2044.03%2037.325%2C%2044.279%2037.236%2C%2044.202%2037.098%2C%2044.351%2037.049%2C%2044.771%2037.167%2C%2044.921%2037.02%2C%2044.861%2036.784%2C%2045.072%2036.691%2C%2045.108%2036.419%2C%2045.284%2036.383%2C%2045.279%2036.253%2C%2045.387%2036.085%2C%2045.557%2036.001%2C%2046.093%2035.861%2C%2046.349%2035.809%2C%2046.013%2035.678%2C%2046.156%2035.287%2C%2046.203%2035.198%2C%2046.189%2035.108%2C%2046.064%2035.036%2C%2045.883%2035.031%2C%2045.799%2034.91%2C%2045.701%2034.812%2C%2045.748%2034.542%2C%2045.532%2034.492%2C%2045.503%2034.327%2C%2045.588%2034.303%2C%2045.413%2033.972%2C%2045.777%2033.623%2C%2045.907%2033.626%2C%2045.871%2033.491%2C%2046.051%2033.374%2C%2046.205%2033.18%2C%2046.05%2033.121%2C%2046.102%2032.97%2C%2046.737%2032.762%2C%2047.177%2032.452%2C%2047.449%2032.401%2C%2047.567%2032.224%2C%2047.648%2032.084%2C%2047.699%2031.4%2C%2048.031%2030.994%2C%2048.17%2030.423%2C%2048.267%2030.336%2C%2048.383%2030.127%2C%2048.567%2029.916%29%29%29 Service Unavailable 2963597 http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=POLYGON((-8.063%2055.379%2C%20-7.22%2055.46%2C%20-6.923%2055.237%2C%20-8.096%2054.438%2C%20-7.551%2054.125%2C%20-7.253%2054.203%2C%20-7.042%2054.361%2C%20-6.108%2054.007%2C%20-6.053%2053.729%2C%20-5.91%2053.37%2C%20-6.002%2052.967%2C%20-6.075%2052.476%2C%20-6.119%2052.14%2C%20-6.481%2052.066%2C%20-6.932%2052.059%2C%20-7.426%2051.923%2C%20-8.041%2051.713%2C%20-8.459%2051.529%2C%20-8.997%2051.399%2C%20-9.667%2051.316%2C%20-10.272%2051.522%2C%20-10.722%2051.883%2C%20-10.722%2052.153%2C%20-9.941%2052.561%2C%20-9.84375%2053.021%2C%20-10.173%2053.291%2C%20-10.404%2053.572%2C%20-10.404%2053.93%2C%20-10.25%2054.29%2C%20-9.92%2054.399%2C%20-8.833%2054.367%2C%20-8.975%2054.622%2C%20-8.8%2054.908%2C%20-8.481%2055.272%2C%20-8.063%2055.379)) Service Unavailable 294640 http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=GEOMETRYCOLLECTION%28POLYGON%20%28%2835.033%2029.631%2C%2034.856%2029.739%2C%2034.547%2030.4%2C%2034.27%2031.217%2C%2034.39%2031.394%2C%2034.491%2031.597%2C%2034.963%2032.821%2C%2035.115%2033.09%2C%2035.534%2033.121%2C%2035.625%2033.248%2C%2035.847%2033.2%2C%2035.87%2033.056%2C%2035.756%2032.726%2C%2035.568%2032.39%2C%2035.405%2032.508%2C%2035.076%2032.468%2C%2035.006%2032.028%2C%2035.057%2031.852%2C%2035.243%2031.751%2C%2034.89%2031.377%2C%2035.384%2031.486%2C%2035.39%2031.244%2C%2035.415%2030.949%2C%2035.202%2030.575%2C%2035.192%2030.347%2C%2035.176%2030.119%2C%2035.033%2029.631%29%29%29 Service Unavailable complete
0a_Minimum_working_example.ipynb
###Markdown OUTDATED, the examples moved to the gallery See https://empymod.github.io/emg3d-gallery---- Minimum working exampleThis is a simple minimum working example to get started, along the lines of the one given in https://emg3d.readthedocs.io/en/stable/usage.htmlexample.To see some more realistic models have a look at the other notebooks in this repo.-------------------------------------------------------------------------------This notebooks uses `discretize` to create meshes easily and plot the model as well as the resulting electric field, which also requires `matplotlib`. If you are interested in a basic example that only requires `emg3d` here it is:```pyimport emg3dimport numpy as np Create a simple grid, 8 cells of length 1 in each direction, starting at the origin.grid = emg3d.utils.TensorMesh([np.ones(8), np.ones(8), np.ones(8)], x0=np.array([0, 0, 0])) The model is a fullspace with tri-axial anisotropy.model = emg3d.utils.Model(grid, res_x=1.5, res_y=1.8, res_z=3.3) The source is a x-directed, horizontal dipole at (4, 4, 4), frequency is 10 Hz.sfield = emg3d.utils.get_source_field(grid, src=[4, 4, 4, 0, 0], freq=10.0) Calculate the electric signal.efield = emg3d.solver.solver(grid, model, sfield, verb=3) Get the corresponding magnetic signal.hfield = emg3d.utils.get_h_field(grid, model, efield) ```-------------------------------------------------------------------------------**Requires**- **emg3d >= 0.9.1**- discretize, matplotlibFirst, we load `emg3d` and `discretize` (to create a mesh), along with `numpy` and `matplotlib`: ###Code import emg3d import discretize import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm %matplotlib notebook plt.style.use('ggplot') ###Output _____no_output_____ ###Markdown 1. MeshFirst, we define the mesh (see `discretize.TensorMesh` for more info). In reality, this task requires some careful considerations. E.g., to avoid edge effects, the mesh should be large enough in order for the fields to dissipate, yet fine enough around source and receiver to accurately model them. This grid is too small, but serves as a minimal example. ###Code grid = discretize.TensorMesh( [[(25, 10, -1.04), (25, 28), (25, 10, 1.04)], [(50, 8, -1.03), (50, 16), (50, 8, 1.03)], [(30, 8, -1.05), (30, 16), (30, 8, 1.05)]], x0='CCC') grid ###Output _____no_output_____ ###Markdown 2. ModelNext we define a very simple fullspace model with $\rho_x=1.5\,\Omega\rm{m}$, $\rho_y=1.8\,\Omega\rm{m}$, and $\rho_z=3.3\,\Omega\rm{m}$. ###Code model = emg3d.utils.Model(grid, res_x=1.5, res_y=1.8, res_z=3.3) ###Output _____no_output_____ ###Markdown We can plot the model using `discretize`; in this case it is obviously rather a boring plot, as it shows a homogenous fullspace. ###Code grid.plot_3d_slicer(np.ones(grid.vnC)*model.res_x) # x-resistivity ###Output _____no_output_____ ###Markdown 3. Source fieldThe source is an x-directed dipole at the origin, with a 10 Hz signal of 1 A (`src` is defined either as `[x, y, z, dip, azimuth]` or `[x0, x1, y0, y1, z0, z1]`; the strength can be set via the `strength` parameter). ###Code sfield = emg3d.utils.get_source_field(grid, src=[0, 0, 0, 0, 0], freq=10) ###Output _____no_output_____ ###Markdown 4. Calculate the electric fieldNow we can calculate the electric field with `emg3d`: ###Code efield = emg3d.solver.solver(grid, model, sfield, verb=3) ###Output * WARNING :: ``emg3d.solver.solver()`` is renamed to ``emg3d.solve()``. Use the new ``emg3d.solve()``, as ``solver()`` will be removed in the future. :: emg3d START :: 07:31:17 :: v0.9.3.dev17 MG-cycle : 'F' sslsolver : False semicoarsening : False [0] tol : 1e-06 linerelaxation : False [0] maxit : 50 nu_{i,1,c,2} : 0, 2, 1, 2 verb : 3 Original grid : 48 x 32 x 32 => 49,152 cells Coarsest grid : 3 x 2 x 2 => 12 cells Coarsest level : 4 ; 4 ; 4 [hh:mm:ss] rel. error [abs. error, last/prev] l s h_ 2h_ \ / 4h_ \ /\ / 8h_ \ /\ / \ / 16h_ \/\/ \/ \/ [07:31:18] 2.623e-02 after 1 F-cycles [1.464e-06, 0.026] 0 0 [07:31:18] 2.253e-03 after 2 F-cycles [1.258e-07, 0.086] 0 0 [07:31:18] 3.051e-04 after 3 F-cycles [1.704e-08, 0.135] 0 0 [07:31:19] 5.500e-05 after 4 F-cycles [3.071e-09, 0.180] 0 0 [07:31:19] 1.170e-05 after 5 F-cycles [6.531e-10, 0.213] 0 0 [07:31:19] 2.745e-06 after 6 F-cycles [1.532e-10, 0.235] 0 0 [07:31:20] 6.873e-07 after 7 F-cycles [3.837e-11, 0.250] 0 0 > CONVERGED > MG cycles : 7 > Final rel. error : 6.873e-07 :: emg3d END :: 07:31:20 :: runtime = 0:00:02 ###Markdown The calculation requires in this case seven multigrid F-cycles and taken just a few seconds. It was able to coarsen in each dimension four times, where the input grid had 49,152 cells, and the coarsest grid had 12 cells. 5. Plot the resultWe can again utilize the in-built functions of a `discretize`-grid to plot, e.g., the x-directed electric field. ###Code grid.plot_3d_slicer(efield.fx.ravel('F'), view='abs', vType='Ex', pcolorOpts={'norm': LogNorm()}) emg3d.Report(discretize) ###Output _____no_output_____