path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
jie/OceanSciences/DrifterParticleImage.ipynb | ###Markdown
Reproduce drifter
###Code
%matplotlib inline
from matplotlib import pylab
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
import scipy.io
import datetime as dt
from salishsea_tools import nc_tools, viz_tools, tidetools, stormtools, bathy_tools,geo_tools
from __future__ import division
drifters = scipy.io.loadmat('/ocean/mhalvers/research/drifters/SoG_drifters.mat',squeeze_me=True)
ubc = drifters['ubc']
grid = nc.Dataset('/ocean/jieliu/research/meopar/nemo-forcing/grid/bathy_meter_SalishSea2.nc','r')
bathy = grid.variables['Bathymetry'][:, :]
X = grid.variables['nav_lon'][:, :]
Y = grid.variables['nav_lat'][:, :]
bathyy,X,Y = tidetools.get_SS2_bathy_data()
def convert_time(matlab_time_array):
"converts a matlab time array to python format"
python_time_array=[]
for t in matlab_time_array:
python_datetime = dt.datetime.fromordinal(int(t)) + dt.timedelta(days=t%1) - dt.timedelta(days = 366)
python_time_array.append(python_datetime)
python_time_array = np.array(python_time_array)
return python_time_array
def get_tracks(switch,lats,lons,ptime,in_water):
"""returns a list of tracks of each buoy,
ie a trajectory for each time the buoy was released into the water"""
all_tracks=[]
for ind in switch:
track_on = 1
i = ind
track ={'time':[], 'lat':[],'lon':[]}
while(track_on):
if in_water[i]!=1:
track_on=0
elif i==np.shape(in_water)[0]-1:
track['time'].append(ptime[i])
track['lat'].append(lats[i])
track['lon'].append(lons[i])
track_on=0
else:
track['time'].append(ptime[i])
track['lat'].append(lats[i])
track['lon'].append(lons[i])
i=i+1
all_tracks.append(track)
return all_tracks
def organize_info(buoy,btype):
""" organizes the buoy info. Groups the buoy data into tracks for when it was released into the water. """
#creat arrays for easier access
buoy_name = btype[buoy][0]
lats = btype[buoy]['lat'].flatten()
lons = btype[buoy]['lon'].flatten()
mtime = btype[buoy]['mtime']
in_water = btype[buoy]['isSub'].flatten()
#convert mtime to python datetimes
ptime = convert_time(mtime)
#loop through in_water flag to find when buoy switched from being out of water to being in water.
switch = [];
for ind in np.arange(1,in_water.shape[0]):
if int(in_water[ind]) != int(in_water[ind-1]):
if int(in_water[ind])==1:
switch.append(ind)
all_tracks=get_tracks(switch,lats,lons,ptime.flatten(),in_water)
return buoy_name, all_tracks
def find_start(tracks, start_date):
"""returns the a list of indices for a track released on start date.
Only checks the month and day of the start day"""
i=0
ind=[]
starttimes=[]
for t in tracks:
if int(t['time'][0].month) == start_date.month:
if int(t['time'][0].day) == start_date.day:
ind.append(i)
i=i+1
return ind
def plot_buoy(tracks, startdate, i=0, fancy=False):
""" plots a buoy trajectory at the given startdate in an axis, ax.
returns the trajectory that was plotted.
The first track released on the startdate is plotted.
For trajectories that were released mulitples times a day, i selects which release is plotted.
"""
fig,ax = plt.subplots(1,1,figsize=(6,6))
ind =find_start(tracks,startdate)
traj=tracks[ind[i]]
duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600
print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours')
ax.plot(traj['lon'],traj['lat'],'og',label = 'data')
#ax.legend(loc='best')
ax.plot(traj['lon'][0],traj['lat'][0],'sr')
print(float(traj['lon'][0]))
#[j,i]=geo_tools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,land_mask=bathyy.mask())
ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2);
if fancy:
cmap = plt.get_cmap('winter_r')
cmap.set_bad('burlywood')
ax.pcolormesh(X, Y, bathy, cmap=cmap)
ax.set_title('Observed Drift Track')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.text(-123.15,49.13, "Fraser River", fontsize=12)
else:
viz_tools.plot_coastline(ax, grid, coords='map')
viz_tools.plot_coastline(ax, grid, coords='map',isobath=4)
viz_tools.plot_coastline(ax, grid, coords='map',isobath=20)
#print ('NEMO coords:', j,i)
ax.set_xlim([-123.6,-123])
ax.set_ylim([48.8,49.4])
ax.set_xticks([-123.6, -123.4, -123.2,-123])
ax.set_xticklabels([-123.6, -123.4, -123.2,-123])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.legend(loc = 'best')
return fig
buoy = 2
name, tracks=organize_info(buoy,ubc)
print(name)
fig=plot_buoy(tracks,dt.datetime(2014,10,8), i=-1)
###Output
UBC-I-0003
Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours
-123.31983333333334
###Markdown
Produce both togather
###Code
def plot_both(tracks, startdate, lon,lat,part,start,end,start_d,end_d,day,hour,minute,duration,i=0, fancy=False):
""" plots a buoy trajectory at the given startdate in an axis, ax.
returns the trajectory that was plotted.
The first track released on the startdate is plotted.
For trajectories that were released mulitples times a day, i selects which release is plotted.
"""
fig,axs = plt.subplots(1,2,figsize=(12,6))
ax = axs[0]
ind =find_start(tracks,startdate)
traj=tracks[ind[i]]
duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600
print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours')
ax.plot(traj['lon'],traj['lat'],'og',label = 'data')
#ax.legend(loc='best')
ax.plot(traj['lon'][0],traj['lat'][0],'sr')
#[j,i]=tidetools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,bathy,\
#lon_tol=0.0052,lat_tol=0.00210, allow_land=False)
ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2);
if fancy:
cmap = plt.get_cmap('winter_r')
cmap.set_bad('burlywood')
ax.pcolormesh(X, Y, bathy, cmap=cmap)
ax.set_title('Observed Drift Track')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.text(-123.15,49.13, "Fraser River", fontsize=12)
else:
viz_tools.plot_coastline(ax, grid, coords='map')
viz_tools.plot_coastline(ax, grid, coords='map',isobath=4)
viz_tools.plot_coastline(ax, grid, coords='map',isobath=20)
#print ('NEMO coords:', j,i)
ax = axs[1]
viz_tools.plot_coastline(ax,grid,coords='map')
viz_tools.plot_coastline(ax,grid,coords='map',isobath=4)
viz_tools.plot_coastline(ax,grid,coords='map',isobath=20)
colors=['DodgerBlue']
for i, key in enumerate(lon.keys()):
ax.scatter(lon[key][1:,part],lat[key][1:,part],marker='o',color=colors[i],label=key)
ax.plot(lon[key][0,part],lat[key][0,part],'sr')
ax.plot(-123-np.array([18.2,13.7,12])/60.,49+np.array([6.4,8,7.6])/60.,'-k',lw=2,color='SpringGreen')
for ax in axs:
ax.set_xlim([-123.6,-123]); ax.set_ylim([48.8,49.4])
ax.set_xticks([-123.6, -123.4, -123.2,-123])
ax.set_xticklabels([-123.6, -123.4, -123.2,-123])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.legend(loc = 'best')
return fig
buoy = 2
name, tracks=organize_info(buoy,ubc)
fig = plot_both(tracks,dt.datetime(2014,10,8),lon112,lat112,0,'7-Oct-2014','11-Oct-2014',8,10,8,16,0,29,i=-1)
lon112={};lat112={}
o112 = NC.Dataset('/ocean/jieliu/research/meopar/Ariane/result/oct8_101e061e05/drop1/12/ariane_trajectories_qualitative.nc','r')
lon112['model']=o112.variables['traj_lon']
lat112['model']=o112.variables['traj_lat']
buoy = 2
name, tracks=organize_info(buoy,ubc)
fig = plot_both(tracks,dt.datetime(2014,10,8),lon112,lat112,0,'7-Oct-2014','11-Oct-2014',8,10,8,16,0,29,i=-1)
###Output
Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours
NEMO coords: 429 290
###Markdown
Reproduce drifter
###Code
%matplotlib inline
from matplotlib import pylab
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
import scipy.io
import datetime as dt
from salishsea_tools import nc_tools, viz_tools, tidetools, stormtools, bathy_tools
from __future__ import division
drifters = scipy.io.loadmat('/ocean/mhalvers/research/drifters/SoG_drifters.mat',squeeze_me=True)
ubc = drifters['ubc']
grid = nc.Dataset('/ocean/jieliu/research/meopar/nemo-forcing/grid/bathy_meter_SalishSea2.nc','r')
bathy = grid.variables['Bathymetry'][:, :]
X = grid.variables['nav_lon'][:, :]
Y = grid.variables['nav_lat'][:, :]
def convert_time(matlab_time_array):
"converts a matlab time array to python format"
python_time_array=[]
for t in matlab_time_array:
python_datetime = dt.datetime.fromordinal(int(t)) + dt.timedelta(days=t%1) - dt.timedelta(days = 366)
python_time_array.append(python_datetime)
python_time_array = np.array(python_time_array)
return python_time_array
def get_tracks(switch,lats,lons,ptime,in_water):
"""returns a list of tracks of each buoy,
ie a trajectory for each time the buoy was released into the water"""
all_tracks=[]
for ind in switch:
track_on = 1
i = ind
track ={'time':[], 'lat':[],'lon':[]}
while(track_on):
if in_water[i]!=1:
track_on=0
elif i==np.shape(in_water)[0]-1:
track['time'].append(ptime[i])
track['lat'].append(lats[i])
track['lon'].append(lons[i])
track_on=0
else:
track['time'].append(ptime[i])
track['lat'].append(lats[i])
track['lon'].append(lons[i])
i=i+1
all_tracks.append(track)
return all_tracks
def organize_info(buoy,btype):
""" organizes the buoy info. Groups the buoy data into tracks for when it was released into the water. """
#creat arrays for easier access
buoy_name = btype[buoy][0]
lats = btype[buoy]['lat'].flatten()
lons = btype[buoy]['lon'].flatten()
mtime = btype[buoy]['mtime']
in_water = btype[buoy]['isSub'].flatten()
#convert mtime to python datetimes
ptime = convert_time(mtime)
#loop through in_water flag to find when buoy switched from being out of water to being in water.
switch = [];
for ind in np.arange(1,in_water.shape[0]):
if int(in_water[ind]) != int(in_water[ind-1]):
if int(in_water[ind])==1:
switch.append(ind)
all_tracks=get_tracks(switch,lats,lons,ptime.flatten(),in_water)
return buoy_name, all_tracks
def find_start(tracks, start_date):
"""returns the a list of indices for a track released on start date.
Only checks the month and day of the start day"""
i=0
ind=[]
starttimes=[]
for t in tracks:
if int(t['time'][0].month) == start_date.month:
if int(t['time'][0].day) == start_date.day:
ind.append(i)
i=i+1
return ind
def plot_buoy(tracks, startdate, i=0, fancy=False):
""" plots a buoy trajectory at the given startdate in an axis, ax.
returns the trajectory that was plotted.
The first track released on the startdate is plotted.
For trajectories that were released mulitples times a day, i selects which release is plotted.
"""
fig,ax = plt.subplots(1,1,figsize=(6,6))
ind =find_start(tracks,startdate)
traj=tracks[ind[i]]
duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600
print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours')
ax.plot(traj['lon'],traj['lat'],'og')
#ax.legend(loc='best')
ax.plot(traj['lon'][0],traj['lat'][0],'sr')
[j,i]=tidetools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,bathy,\
lon_tol=0.0052,lat_tol=0.00210, allow_land=False)
ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2);
if fancy:
cmap = plt.get_cmap('winter_r')
cmap.set_bad('burlywood')
ax.pcolormesh(X, Y, bathy, cmap=cmap)
ax.set_title('Observed Drift Track')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.text(-123.15,49.13, "Fraser River", fontsize=12)
else:
viz_tools.plot_coastline(ax, grid, coords='map')
viz_tools.plot_coastline(ax, grid, coords='map',isobath=4)
viz_tools.plot_coastline(ax, grid, coords='map',isobath=20)
print ('NEMO coords:', j,i)
ax.set_xlim([-123.6,-123])
ax.set_ylim([48.8,49.4])
ax.set_xticks([-123.6, -123.4, -123.2,-123])
ax.set_xticklabels([-123.6, -123.4, -123.2,-123])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
return fig
mpl.rcParams.update({'font.size': 14})
mpl.rcParams["axes.formatter.useoffset"] = False
buoy = 2
name, tracks=organize_info(buoy,ubc)
print(name)
fig=plot_buoy(tracks,dt.datetime(2014,10,8), i=-1)
###Output
UBC-I-0003
Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours
NEMO coords: 429 290
###Markdown
Produce both togather
###Code
def plot_both(tracks, startdate, lon,lat,part,start,end,start_d,end_d,day,hour,minute,duration,i=0, fancy=False):
""" plots a buoy trajectory at the given startdate in an axis, ax.
returns the trajectory that was plotted.
The first track released on the startdate is plotted.
For trajectories that were released mulitples times a day, i selects which release is plotted.
"""
fig,axs = plt.subplots(1,2,figsize=(12,6))
ax = axs[0]
ind =find_start(tracks,startdate)
traj=tracks[ind[i]]
duration = (traj['time'][-1]-traj['time'][0]).total_seconds()/3600
print ('Released', traj['time'][0], 'at', traj['lat'][0], ',' , traj['lon'][0], 'for' , duration, 'hours')
ax.plot(traj['lon'],traj['lat'],'og',label = 'data')
#ax.legend(loc='best')
ax.plot(traj['lon'][0],traj['lat'][0],'sr')
[j,i]=tidetools.find_closest_model_point(float(traj['lon'][0]),float(traj['lat'][0]),X,Y,bathy,\
lon_tol=0.0052,lat_tol=0.00210, allow_land=False)
ax.plot(-123-np.array([18.2, 13.7, 12])/60.,49+np.array([6.4, 8, 7.6])/60.,'-k',lw=2);
if fancy:
cmap = plt.get_cmap('winter_r')
cmap.set_bad('burlywood')
ax.pcolormesh(X, Y, bathy, cmap=cmap)
ax.set_title('Observed Drift Track')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.text(-123.15,49.13, "Fraser River", fontsize=12)
else:
viz_tools.plot_coastline(ax, grid, coords='map')
viz_tools.plot_coastline(ax, grid, coords='map',isobath=4)
viz_tools.plot_coastline(ax, grid, coords='map',isobath=20)
print ('NEMO coords:', j,i)
ax = axs[1]
viz_tools.plot_coastline(ax,grid,coords='map')
viz_tools.plot_coastline(ax,grid,coords='map',isobath=4,color='DarkViolet')
viz_tools.plot_coastline(ax,grid,coords='map',isobath=20,color='OrangeRed')
colors=['DodgerBlue']
for i, key in enumerate(lon.keys()):
ax.scatter(lon[key][1:,part],lat[key][1:,part],marker='o',color=colors[i],label=key)
ax.scatter(lon[key][0,part],lat[key][0,part],color='0.30',marker='s')
ax.plot(-123-np.array([18.2,13.7,12])/60.,49+np.array([6.4,8,7.6])/60.,'-k',lw=2,color='SpringGreen')
for ax in axs:
ax.set_xlim([-123.6,-123]); ax.set_ylim([48.8,49.4])
ax.set_xticks([-123.6, -123.4, -123.2,-123])
ax.set_xticklabels([-123.6, -123.4, -123.2,-123])
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.legend(loc = 2)
return fig
lon112={};lat112={}
o112 = NC.Dataset('/ocean/jieliu/research/meopar/Ariane/result/oct8_101e061e05/drop1/12/ariane_trajectories_qualitative.nc','r')
lon112['model']=o112.variables['traj_lon']
lat112['model']=o112.variables['traj_lat']
mpl.rcParams.update({'font.size': 14})
mpl.rcParams["axes.formatter.useoffset"] = False
buoy = 2
name, tracks=organize_info(buoy,ubc)
fig = plot_both(tracks,dt.datetime(2014,10,8),lon112,lat112,0,'7-Oct-2014','11-Oct-2014',8,10,8,16,0,29,i=-1)
###Output
Released 2014-10-08 16:10:07.000003 at 49.1054983333 , -123.319833333 for 28.750277778333334 hours
NEMO coords: 429 290
|
Lessons/Pyspark_Notebooks/Pyspark_array_manipulation_2.ipynb | ###Markdown
Difference Between map and flatmap
###Code
values = sc.parallelize([1, 2, 3, 4], 2)
print(values.map(range).collect())
# [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
print(values.flatMap(range).collect())
# [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
###Output
[range(0, 1), range(0, 2), range(0, 3), range(0, 4)]
[0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
###Markdown
Converting from Pyspark to conventional Python functionKeep in mind, the Pyspark way and map reduce ways are still faster on large datasets!
###Code
def big_sum(ls):
'''Same as above, example 1'''
ls = [x*2 for x in ls if (x*2)%4 == 0]
summation = sum(ls)
return math.sqrt(summation)
big_sum(range(100_000))
def f(ls):
'''same as above, example 2'''
s = 0
for i in ls:
# only the values whose square is divisible by 4, gets added
if (i*2)%4 == 0:
s += (i*2)
return math.sqrt(s)
print(f(range(100000)))
###Output
70709.97100833799
###Markdown
Difference Between map and flatmap
###Code
values = sc.parallelize([1, 2, 3, 4], 2)
print(values.map(range).collect()) # ranges an RDD as a 2D list, and in this case what goes in each is a range object, after each value in original is given to the range function
# [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
print(values.flatMap(range).collect()) # flattens the data to a 1D list
# [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
###Output
[range(0, 1), range(0, 2), range(0, 3), range(0, 4)]
[0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
###Markdown
Difference Between map and flatmap
###Code
values = sc.parallelize([1, 2, 3, 4], 2)
print(values.map(range).collect())
# [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
print(values.flatMap(range).collect())
# [0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
###Output
[range(0, 1), range(0, 2), range(0, 3), range(0, 4)]
[0, 0, 1, 0, 1, 2, 0, 1, 2, 3]
|
data_modelling/01_indroduction_to_data_modelling/exercises/L1_Exercise_2_Creating_a_Table_with_Apache_Cassandra.ipynb | ###Markdown
Lesson 1 Exercise 2: Creating a Table with Apache Cassandra Walk through the basics of Apache Cassandra. Complete the following tasks: Create a table in Apache Cassandra, Insert rows of data, Run a simple SQL query to validate the information. `` denotes where the code needs to be completed. Note: __Do not__ click the blue Preview button in the lower taskbar Import Apache Cassandra python package
###Code
import cassandra
###Output
_____no_output_____
###Markdown
Create a connection to the database
###Code
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
session = cluster.connect()
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Create a keyspace to do the work in
###Code
## TO-DO: Create the keyspace
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS udacity
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Connect to the Keyspace
###Code
## To-Do: Add in the keyspace you created
try:
session.set_keyspace('udacity')
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
Create a Song Library that contains a list of songs, including the song name, artist name, year, album it was from, and if it was a single. `song_titleartist_nameyearalbum_namesingle` TO-DO: You need to create a table to be able to run the following query: `select * from songs WHERE year=1970 AND artist_name="The Beatles"`
###Code
## TO-DO: Complete the query below
query = "CREATE TABLE IF NOT EXISTS music_library "
query = query + "(song_title text, artist_name text, year int, album_name text, single text, PRIMARY KEY (year, artist_name))"
try:
session.execute(query)
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Insert the following two rows in your table`First Row: "Across The Universe", "The Beatles", "1970", "False", "Let It Be"``Second Row: "The Beatles", "Think For Yourself", "False", "1965", "Rubber Soul"`
###Code
## Add in query and then run the insert statement
query = "INSERT INTO music_library (song_title, artist_name, year, single, album_name)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
try:
session.execute(query, ("Across The Universe", "The Beatles", 1970, "False", "Let It Be"))
except Exception as e:
print(e)
try:
session.execute(query, ("Think For Yourself", "The Beatles", 1965, "False", "Rubber Soul"))
except Exception as e:
print(e)
###Output
_____no_output_____
###Markdown
TO-DO: Validate your data was inserted into the table.
###Code
## TO-DO: Complete and then run the select statement to validate the data was inserted into the table
query = 'SELECT * FROM music_library'
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
###Output
1965 Rubber Soul The Beatles
1970 Let It Be The Beatles
###Markdown
TO-DO: Validate the Data Model with the original query.`select * from songs WHERE YEAR=1970 AND artist_name="The Beatles"`
###Code
##TO-DO: Complete the select statement to run the query
query = "drop table music_library"
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
###Output
_____no_output_____
###Markdown
And Finally close the session and cluster connection
###Code
session.shutdown()
cluster.shutdown()
###Output
_____no_output_____ |
docs/writecustom.ipynb | ###Markdown
Writing Custom Models Writing custom theoretical models is a powerful, extensible option of the LamAna package. Authoring Custom ModelsCustom models are simple `.py` files that can be locally placed by the user into the models directory. The API finds these selected files from the `apply(model='')` method in the `distributions.Case` class. In order for these processes to work smoothly, the following essentials are needed to "handshake" with `theories` module. 1. Implement a `_use_model_()` hook that returns (at minimum) an updated DataFrame.1. If using the class-style models, implement `_use_model_()` hook within a class that inherits from `theories.BaseModel`.Exceptions for specific models are maintained by the models author. Which style do I implement?- For beginners, function-style models are the best way to start making custom models.- We recommend class-style models, which use object-oriented principles such as inheritance. This is best suited for intermediate Pythonistas, which we encourage everyone to consider acheiving. :) Examples of both function-style and class-style models are found in the ["examples"](https://github.com/par2/lamana/tree/develop/examples) folder of the repository.The following cell shows an excerpt of the class-style model. ```python------------------------------------------------------------------------------ Class-style model ...class Model(BaseModel): '''A custom CLT model. A modified laminate theory for circular biaxial flexure disks, loaded with a flat piston punch on 3-ball support having two distinct materials (polymer and ceramic). ''' def __init__(self): self.Laminate = None self.FeatureInput = None self.LaminateModel = None def _use_model_(self, Laminate, adjusted_z=False): '''Return updated DataFrame and FeatureInput. ... Returns ------- tuple The updated calculations and parameters stored in a tuple `(LaminateModel, FeatureInput)``. df : DataFrame LaminateModel with IDs and Dimensional Variables. FeatureInut : dict Geometry, laminate parameters and more. Updates Globals dict for parameters in the dashboard output. ''' ... return (df, FeatureInput) Add Defaults here```
###Code
.. note::
DEV: If testing with both function- and class-styles, keep in mind any changes to the model should be reflected in both styles.
###Output
_____no_output_____
###Markdown
What are `Defaults`? Recall there are a set of **geometric, loading and material parameters** that are required to run LT calculations. During analysis, retyping these parameters may become tedious each time you wish to run a simple plot or test parallel case. Therefore, you can prepare variables that store default parameters with specific values.LamAna eases this process by simply inheriting from `BaseDefaults`. The `BaseDefaults` class stores a number of common *geometry strings*, *Geometry objects* and arbitrary *loading parameters*/*material properties*. These values are intended to get you started, but you can alter to fit your better suit model. In addition, this class has methods for easily building formatted *FeatureInput* objects. ```pythonclass Defaults(BaseDefaults): '''Return parameters for building distributions cases. Useful for consistent testing. Dimensional defaults are inherited from utils.BaseDefaults(). Material-specific parameters are defined here by he user. - Default geometric parameters - Default material properties - Default FeatureInput Examples ======== >>> dft = Defaults() >>> dft.load_params {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,} >>> dft.mat_props {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} >>> dft.FeatureInput {'Geometry' : '400-[200]-800', 'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}, 'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],}, 'Custom' : None, 'Model' : Wilson_LT} ''' def __init__(self): BaseDefaults.__init__(self) '''DEV: Add defaults first. Then adjust attributes.''' DEFAULTS ------------------------------------------------------------ Build dicts of geometric and material parameters self.load_params = { 'R': 12e-3, specimen radius 'a': 7.5e-3, support ring radius 'p': 5, points/layer 'P_a': 1, applied load 'r': 2e-4, radial distance from center loading } self.mat_props = { 'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33} } ATTRIBUTES ---------------------------------------------------------- FeatureInput self.FeatureInput = self.get_FeatureInput( self.Geo_objects['standard'][0], load_params=self.load_params, mat_props=self.mat_props, model='Wilson_LT', global_vars=None )```
###Code
.. see also:
The latter guidelines are used for authoring custom models on your local machine. If you would like to share you model, see the `Contributions: As an Author <contribution>_` section for more details.
###Output
_____no_output_____ |
example/Example-a3667-Chandra.ipynb | ###Markdown
Load data Initialize an `ObservationList` object.
###Code
observations = sbfit.ObservationList()
###Output
_____no_output_____
###Markdown
Load images into the `ObservationList` object.
###Code
image_dir = "a3667/chandra"
obsids = [513, 889, 5751, 5752, 5753, 6292, 6295, 6296]
for obsid in obsids:
observations.add_observation_from_file(f"a3667/chandra/{obsid}_band1_thresh.img",
f"a3667/chandra/{obsid}_band1_thresh.expmap",
f"a3667/chandra/{obsid}_band1_nxb_full.img",
bkg_norm_type="count",
bkg_norm_keyword="bkgnorm", )
###Output
_____no_output_____
###Markdown
Read region file
###Code
epanda = sbfit.read_region("a3667.reg")
###Output
_____no_output_____
###Markdown
Extract a profileThe region set loaded in the previous step is used.The `channel_width` is the size of the radius grid for a profile. The value should be less than the psf width.
###Code
a3667_chandra_profile = observations.get_profile(epanda, channel_width=0.5)
###Output
WARNING: FITSFixedWarning: RADECSYS= 'ICRS ' / default
the RADECSYS keyword is deprecated, use RADESYSa. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 51444.513808 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 51797.187928 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 53529.557755 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 53534.009225 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 53539.451968 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 53532.193692 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 53536.858264 from DATE-END'. [astropy.wcs.wcs]
WARNING: FITSFixedWarning: 'datfix' made the change 'Set DATEREF to '1998-01-01' from MJDREF.
Set MJD-END to 53541.426528 from DATE-END'. [astropy.wcs.wcs]
###Markdown
First, let's bin the profile.
###Code
a3667_chandra_profile.rebin(130, 250,method="lin",min_cts=200,log_width=0.005,lin_width=1)
a3667_chandra_profile.plot(scale="loglog")
###Output
/Users/xyzhang/anaconda3/envs/SBFit/lib/python3.7/site-packages/sbfit-0.1.2-py3.7.egg/sbfit/profile.py:506: UserWarning: No model set.
###Markdown
- For *Chandra* observations, the PSF is small enough to use an identity smoothing matrix, which is the default setting.- For *XMM-Newton*, a King profile smoothing matrix is essential to account for the broad PSF. The parameters of the profile are provided in the PSF calibration files.
###Code
a3667_chandra_profile.set_smooth_matrix("identity", king_alpha=1.4,king_rc=10,sigma=1)
###Output
_____no_output_____
###Markdown
Set modelsSet a double power law model and load it into the profile.The `+` operator can add multiple model instances into a compound model instance.Here we use the combination of a `DoublePowerLaw` model and a `Constant` model, which represents the ICM emission and the X-ray sky background.
###Code
dpl = sbfit.model.DoublePowerLaw()
cst = sbfit.model.Constant()
a3667_chandra_profile.set_model(dpl + cst)
print(a3667_chandra_profile.model)
###Output
Model: CompoundModel
Inputs: ('x',)
Outputs: ('y',)
Model set size: 1
Expression: [0] + [1]
Components:
[0]: <DoublePowerLaw(norm=1., a1=0.1, a2=1., r=1., c=2.)>
[1]: <Constant(norm=0.)>
Parameters:
norm_0 a1_0 a2_0 r_0 c_0 norm_1
------ ---- ---- --- --- ------
1.0 0.1 1.0 1.0 2.0 0.0
###Markdown
Before fit, set initial parameters that make the model profile close to the observed profile.
###Code
a3667_chandra_profile.model.norm_0 = 4e-4
a3667_chandra_profile.model.a1_0 = 0
a3667_chandra_profile.model.a2_0 = 0.6
a3667_chandra_profile.model.r_0 = 191
a3667_chandra_profile.model.c_0 = 2.4
a3667_chandra_profile.model.norm_1 = 5e-7
a3667_chandra_profile.calculate()
a3667_chandra_profile.plot()
###Output
_____no_output_____
###Markdown
Set parameter constraints for the model in case that the optimizer goes too far.
###Code
a3667_chandra_profile.model.norm_1.fixed = True
a3667_chandra_profile.model.norm_0.bounds = (1e-4, 6e-4)
a3667_chandra_profile.model.a1_0.bounds = (-0.7, 0.3)
a3667_chandra_profile.model.a2_0.bounds = (0.4, 1.1)
a3667_chandra_profile.model.r_0.bounds = (150, 220)
a3667_chandra_profile.model.c_0.bounds = (1.8, 3.3)
###Output
_____no_output_____
###Markdown
Fit
###Code
a3667_chandra_profile.fit(show_step=True, tolerance=0.01)
p_bin2 = a3667_chandra_profile.deepcopy()
p_bin2.rebin(130, 250,method="lin",min_cts=200,log_width=0.005,lin_width=2)
p_bin2.fit(show_step=True)
###Output
Start fit
C-stat: 68.426
[3.98619753e-04 7.83445097e-02 5.39570169e-01 1.91250219e+02
2.47354035e+00]
C-stat: 68.405
[3.97761118e-04 7.71829563e-02 5.38463770e-01 1.91285037e+02
2.47927233e+00]
C-stat: 68.405
[3.97719203e-04 7.71599546e-02 5.38401152e-01 1.91286173e+02
2.47954135e+00]
Iteration terminated.
Degree of freedom: 55; C-stat: 68.4048
norm_0: 3.98e-04
a1_0: 7.72e-02
a2_0: 5.38e-01
r_0: 1.91e+02
c_0: 2.48e+00
Uncertainties from rough estimation:
norm_0: 2.973e-05
a1_0: 5.231e-02
a2_0: 5.086e-02
r_0: 4.137e-01
c_0: 1.881e-01
###Markdown
The uncertainties here are obtained from the Hessian matrix in the fit routine. To better estimate the uncertainties, we need to perform a Monte-Carlo Markov Chain analysis.Let's have a look of the best-fit profile first.
###Code
a3667_chandra_profile.calculate()
a3667_chandra_profile.plot(scale="loglog")
###Output
_____no_output_____
###Markdown
Now we use Monte-Carlo Markov Chain method to estimate the uncertainties. It takes hours to finish.
###Code
a3667_chandra_profile.mcmc_error(nsteps=5000, burnin=500)
###Output
100%|โโโโโโโโโโ| 5000/5000 [4:14:59<00:00, 3.06s/it]
###Markdown
The corner plot
###Code
a3667_chandra_profile.plot(plot_type="contour")
###Output
_____no_output_____
###Markdown
Let's have a look how the MCMC chains walk.
###Code
a3667_chandra_profile.plot(plot_type="mcmc_chain")
###Output
_____no_output_____
###Markdown
The uncertainties are stored in the `error` attribute.
###Code
print(a3667_chandra_profile.error)
a3667_chandra_profile.calculate()
###Output
/Users/xyzhang/anaconda3/envs/my/lib/python3.7/site-packages/sbfit-0.1.2-py3.7.egg/sbfit/profile.py:354: RuntimeWarning: invalid value encountered in true_divide
/Users/xyzhang/anaconda3/envs/my/lib/python3.7/site-packages/sbfit-0.1.2-py3.7.egg/sbfit/statistics.py:31: RuntimeWarning: invalid value encountered in true_divide
|
2. Installation of Numpy.ipynb | ###Markdown
Installing via pipMost major projects upload official packages to the Python Package index. They can be installed on most operating systems using Pythonโs standard pip package manager.Note that you need to have Python and pip already installed on your system.You can install packages via commands such as:
###Code
python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
###Output
_____no_output_____
###Markdown
Windows
###Code
pip install numpy
We recommend using an user install, using the --user flag to pip (note: do not use sudo pip, which can cause problems). This installs packages for your local user, and does not write to the system directories.
###Output
_____no_output_____
###Markdown
Install system-wide via a Linux package managerUsers on Linux can install packages from repositories provided by the distributions. These installations will be system-wide, and may have older package versions than those available using pip.
###Code
sudo apt-get install python-numpy python-scipy python-matplotlib ipython ipython-notebook python-pandas python-sympy python-nose
###Output
_____no_output_____
###Markdown
Fedora
###Code
Fedora 22 and later:
sudo dnf install numpy scipy python-matplotlib ipython python-pandas sympy python-nose atlas-devel
###Output
_____no_output_____ |
Big-Data-Clusters/CU2/Public/content/cert-management/cer043-install-controller-cert.ipynb | ###Markdown
CER043 - Install signed Controller certificate==============================================This notebook installs into the Big Data Cluster the certificate signedusing:- [CER033 - Sign Controller certificate with cluster Root CA](../cert-management/cer033-sign-controller-generated-cert.ipynb)NOTE: At the end of this notebook the Controller pod and all pods thatuse PolyBase (Master Pool and Compute Pool pods) will be restarted toload the new certificates.Steps----- Parameters
###Code
app_name = "controller"
scaledset_name = "control"
container_name = "controller"
prefix_keyfile_name = "controller"
common_name = "controller-svc"
test_cert_store_root = "/var/opt/secrets/test-certificates"
###Output
_____no_output_____
###Markdown
Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
try:
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
j = load_json("cer043-install-controller-cert.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
# rules that have 9 elements are the injected (output) rules (the ones we want). Rules
# with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,
# not ../repair/tsg029-nb-name.ipynb)
if len(rule) == 9:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']}
error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]}
install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
###Output
_____no_output_____
###Markdown
Get the Kubernetes namespace for the big data clusterGet the namespace of the Big Data Cluster use the kubectl command lineinterface .**NOTE:**If there is more than one Big Data Cluster in the target Kubernetescluster, then either:- set \[0\] to the correct value for the big data cluster.- set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio.
###Code
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True)
except:
from IPython.display import Markdown
print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.")
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}')
###Output
_____no_output_____
###Markdown
Create a temporary directory to stage files
###Code
# Create a temporary directory to hold configuration files
import tempfile
temp_dir = tempfile.mkdtemp()
print(f"Temporary directory created: {temp_dir}")
###Output
_____no_output_____
###Markdown
Helper function to save configuration files to disk
###Code
# Define helper function 'save_file' to save configuration files to the temporary directory created above
import os
import io
def save_file(filename, contents):
with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file:
text_file.write(contents)
print("File saved: " + os.path.join(temp_dir, filename))
print("Function `save_file` defined successfully.")
###Output
_____no_output_____
###Markdown
Get name of the โRunningโ `controller` `pod`
###Code
# Place the name of the 'Running' controller pod in variable `controller`
controller = run(f'kubectl get pod --selector=app=controller -n {namespace} -o jsonpath={{.items[0].metadata.name}} --field-selector=status.phase=Running', return_output=True)
print(f"Controller pod name: {controller}")
###Output
_____no_output_____
###Markdown
Copy certifcate files from `controller` to local machine
###Code
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.p12 {prefix_keyfile_name}-certificate.p12 -c controller -n {namespace}')
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-certificate.pem {prefix_keyfile_name}-certificate.pem -c controller -n {namespace}')
run(f'kubectl cp {controller}:{test_cert_store_root}/{app_name}/{prefix_keyfile_name}-privatekey.pem {prefix_keyfile_name}-privatekey.pem -c controller -n {namespace}')
os.chdir(cwd)
###Output
_____no_output_____
###Markdown
Copy certifcate files from local machine to `controldb`
###Code
import os
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp {prefix_keyfile_name}-certificate.p12 controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.p12 -c mssql-server -n {namespace}')
run(f'kubectl cp {prefix_keyfile_name}-certificate.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-certificate.pem -c mssql-server -n {namespace}')
run(f'kubectl cp {prefix_keyfile_name}-privatekey.pem controldb-0:/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem -c mssql-server -n {namespace}')
os.chdir(cwd)
###Output
_____no_output_____
###Markdown
Get the `controller-db-rw-secret` secretGet the controller SQL symmetric key password for decryption.
###Code
import base64
controller_db_rw_secret = run(f'kubectl get secret/controller-db-rw-secret -n {namespace} -o jsonpath={{.data.encryptionPassword}}', return_output=True)
controller_db_rw_secret = base64.b64decode(controller_db_rw_secret).decode('utf-8')
print("controller_db_rw_secret retrieved")
###Output
_____no_output_____
###Markdown
Update the files table with the certificates through opened SQL connection
###Code
import os
sql = f"""
OPEN SYMMETRIC KEY ControllerDbSymmetricKey DECRYPTION BY PASSWORD = '{controller_db_rw_secret}'
DECLARE @FileData VARBINARY(MAX), @Key uniqueidentifier;
SELECT @Key = KEY_GUID('ControllerDbSymmetricKey');
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.p12', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.p12',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0';
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-certificate.pem', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-certificate.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0';
SELECT TOP 1 @FileData = doc.BulkColumn FROM OPENROWSET(BULK N'/var/opt/mssql/{prefix_keyfile_name}-privatekey.pem', SINGLE_BLOB) AS doc;
EXEC [dbo].[sp_set_file_data_encrypted] @FilePath = '/config/scaledsets/control/containers/{container_name}/files/{prefix_keyfile_name}-privatekey.pem',
@Data = @FileData,
@KeyGuid = @Key,
@Version = '0';
"""
save_file("insert_certificates.sql", sql)
cwd = os.getcwd()
os.chdir(temp_dir) # Workaround kubectl bug on Windows, can't put c:\ on kubectl cp cmd line
run(f'kubectl cp insert_certificates.sql controldb-0:/var/opt/mssql/insert_certificates.sql -c mssql-server -n {namespace}')
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "SQLCMDPASSWORD=`cat /var/run/secrets/credentials/mssql-sa-password/password` /opt/mssql-tools/bin/sqlcmd -b -U sa -d controller -i /var/opt/mssql/insert_certificates.sql" """)
# Cleanup
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/insert_certificates.sql" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.p12" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-certificate.pem" """)
run(f"""kubectl exec controldb-0 -c mssql-server -n {namespace} -- bash -c "rm /var/opt/mssql/{prefix_keyfile_name}-privatekey.pem" """)
os.chdir(cwd)
###Output
_____no_output_____
###Markdown
Clean up certificate staging areaRemove the certificate files generated on disk (they have now beenplaced in the controller database).
###Code
cmd = f"rm -r {test_cert_store_root}/{app_name}"
run(f'kubectl exec {controller} -c controller -n {namespace} -- bash -c "{cmd}"')
###Output
_____no_output_____
###Markdown
Clear out the controller\_db\_rw\_secret variable
###Code
controller_db_rw_secret= ""
###Output
_____no_output_____
###Markdown
Restart `controller` to pick up new certificates.Delete the controller pod so that it can restart the controller and pickup new certificates.
###Code
run(f'kubectl delete pod {controller} -n {namespace}')
###Output
_____no_output_____
###Markdown
Restart `master pool` pods to pick up new certificates.All pods that use PolyBase need to be restarted to load the newcertificates.
###Code
pods = run(f'kubectl get pods -n {namespace} --selector role=master-pool --output=jsonpath={{.items[*].metadata.name}}', return_output=True)
for pod in pods.split(' '):
run(f'kubectl delete pod {pod} -n {namespace}')
###Output
_____no_output_____
###Markdown
Restart `compute pool` pods to pick up new certificates.All pods that use PolyBase need to be restarted to load the newcertificates.
###Code
pods = run(f'kubectl get pods -n {namespace} --selector role=compute-pool --output=jsonpath={{.items[*].metadata.name}}', return_output=True)
for pod in pods.split(' '):
run(f'kubectl delete pod {pod} -n {namespace}')
###Output
_____no_output_____
###Markdown
Clean up temporary directory for staging configuration files
###Code
# Delete the temporary directory used to hold configuration files
import shutil
shutil.rmtree(temp_dir)
print(f'Temporary directory deleted: {temp_dir}')
print('Notebook execution complete.')
###Output
_____no_output_____ |
password_condtion.ipynb | ###Markdown
###Code
import re
a=input("enter the strong password")
count=0
while True:
if (len(a)<8):
count=-1
break
elif not re.search("[A-Z]",a):
count=-1
break
elif not re.search("[0-9]",a):
count=-1
break
elif not re.search("[_@$]",a):
count=-1
break
elif not re.search("[a-z]",a):
count=-1
break
elif re.search("\s",a):
count=-1
break
else:
count=0
print("valid password")
break
if count is not 0:
print("not a valid password")
a=input("enter the password")
b,c,d,e=0,0,0,0
if (len(a)>=8):
for i in a:
if (i.islower()):
b+=1
elif (i.isupper()):
c+=1
elif (i.isdigit()):
d+=1
elif (i=='@' or i=='_' or i=='$' or i=='%'):
e+=1
if (b>=1 and c>=1 and d>=1 and e>=1 and b+c+d+e==len(a)):
print("valid pass word\n")
else:
print("not a valid password\n")
print("try adding the conditions\n\n" +
"1.at least 8 characters\n"+
"2.one uppercase\n"+
"3.one lower case\n" +
"4.one special character\n")
###Output
enter the passwordM
not a valid password
try adding the conditions
1.at least 8 characters
2.one uppercase
3.one lower case
4.one special character
|
ANN/DNN1.ipynb | ###Markdown
Deep Learning Version , oversampled
###Code
#imports
import time
start_time = time.time()
import numpy as np
from matplotlib import pyplot as plt
from keras import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.metrics import binary_accuracy
#from keras.utils import np_utils
print("--- %s seconds ---" % (time.time() - start_time))
# import datasets with time taken!
#smoll
""" # commented out to save computation
start_time = time.time()
smoll = np.loadtxt("/home/willett/NeutrinoData/small_CNN_input_processed.txt", comments='#')
print("--- %s seconds ---" % (time.time() - start_time))
print(smoll.shape)
"""
#and the full
start_time = time.time()
fll = np.loadtxt("/home/willett/NeutrinoData/full_CNN_input_processed.txt", comments='#')
print("--- %s seconds ---" % (time.time() - start_time))
print(fll.shape)
""" # commented out to save computation
#and the full
start_time = time.time()
fll = np.loadtxt("/home/willett/NeutrinoData/test_CNN_input_processed.txt", comments='#')
print("--- %s seconds ---" % (time.time() - start_time))
print(fll.shape)
"""
# extract title
pls = open("/home/willett/NeutrinoData/small_CNN_input_processed.txt", "r")
title = pls.readline()
title = title[2:-1]
print(title)
# creating a dataset switch, change what UsedData is to change CNN
UD = fll # Used Data = <dataset>
UDLength = UD.shape[0]
print("shape: ",UD.shape,"\nsize: ", UD.size," \nlength: ", UDLength)
# dataset is expected in this format:
# FirstLayer LastLayer NHits AverageZP Thrust PID_Angle PID_Front PID_LLR_M
#FirstLayer LastLayer NHits_Low AverageZP Thrust_Lo PID_Angle PID_Front PID_LLR_M
#Energy_As Angle_Bet Distance_Bet Sig Bg
# with Sig and Bg expected as one hot vectors.
# splitting X = dataset , Y = one hot vectors
X = UD[:,0:-2]
Y = UD[:,-2:1000]
print("X shape: ",X.shape,"\nY shape: ", Y.shape)
# they will be split into testing and training at compile
# inevitable bias removal... by oversampling
# using a 50% oversampling ratio, because i want to ! (no citation)
#how long?
start_time = time.time()
SigI = np.where(Y[:,0] == 1)[0]
BgI= np.where(Y[:,0] == 0)[0]
SigN = SigI.size # how much signal there is
BgN = BgI.size # how much background there is
Multip = int(BgN/SigN) # how much more signal event copies needed for ~50%
print(" signal and background event number: ",SigI.size,BgI.size,"\n number more needed:",Multip)
SNratio = (100*SigN)/(SigN + BgN)
print("initial Signal to Noise ratio: ",SNratio,"% signal")
#im going to reconstruct the arrays of signal events, background events, then add them together and shuffle!
XSig = X[SigI]
XBg = X[BgI]
YSig = Y[SigI]
YBg = Y[BgI]
#print(XSig.shape,XBg.shape, YSig.shape, YBg.shape) # these are the events of each type.
# this is the array of signal repreated (tiled) multip times.
YSigM = np.transpose(np.tile(np.transpose(YSig), Multip))
XSigM = np.transpose(np.tile(np.transpose(XSig), Multip))
print( XSigM.shape, YSigM.shape)
#adding arrays together and then shuffling:
X2 = np.concatenate((XBg,XSigM))
Y2 = np.concatenate((YBg,YSigM))
print(X2.shape, Y2.shape)
#shuffling
print("these arrays should be different vertically, to ensure shuffle succesful:")
print(X2[0:3,0],Y2[0:3,0])
np.random.shuffle(X2)
np.random.shuffle(Y2)
print(X2[0:3,0],Y2[0:3,0])
#final ratio:
NewSigN = YSigM.shape[0]
SNRatioNew = (100*YSigM.shape[0]) / (YSigM.shape[0] + BgN)
print("final Signal Noise ratio: ",SNRatioNew,"% signal")
print("--- %s seconds ---" % (time.time() - start_time))
print(X2.shape)
X3 = np.expand_dims(X2, axis=2)
print(X3.shape)
#neural network architecture:
model = Sequential()
# set variables:
width = 30 #--number on nodes in the layer
DR = 0.5 #--fraction of nodes dropped during training
AT = "sigmoid" #--activation type for dense layers
UB = True #--use bias vectors
InDim = (X3.shape[1],X3.shape[2] ) #--input shape of single sample (tuple)
#construction:
start_time = time.time() # how long does it take?
model.add(Dense(width,activation=AT, use_bias=UB, input_shape=(19,1) )) # input layer and 1
model.add(Dropout(DR))
model.add(Dense(width,activation=AT, use_bias=UB )) # 2
model.add(Dropout(DR))
model.add(Dense(width,activation=AT, use_bias=UB )) # 3
model.add(Dropout(DR))
model.add(Dense(width,activation=AT, use_bias=UB )) # 4
model.add(Dropout(DR))
#Because this one is deep:
model.add(Dense(width,activation=AT, use_bias=UB )) # 3
model.add(Dropout(DR))
model.add(Dense(width,activation=AT, use_bias=UB )) # 4
model.add(Dropout(DR))
model.add(Dense(width,activation=AT, use_bias=UB )) # 3
model.add(Dropout(DR))
model.add(Dense(width,activation=AT, use_bias=UB )) # 4
model.add(Dropout(DR))
model.add(Flatten()) # reduce dimensionality of the input data for output
model.add(Dense(2, activation="softmax", use_bias=UB)) # output layer softmax recommended
#(classification mutually excluive + softmax differentiable for optimizing)
# -> https://www.quora.com/Artificial-Neural-Networks-Why-do-we-use-softmax-function-for-output-layer
print("--- %s seconds ---" % (time.time() - start_time))
# Apply regularizer if overfitting! ^
# binary_crossentropy is the best according to https://www.dlology.com/blog/how-to-choose-last-layer-activation-and-loss-function/
# adam is best for me according to https://towardsdatascience.com/types-of-optimization-algorithms-used-in-neural-networks-and-ways-to-optimize-gradient-95ae5d39529f
# compile model:
start_time = time.time() # how long does it take?
model.compile(optimizer='adagrad',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_accuracy' ])
print("--- %s seconds ---" % (time.time() - start_time))
# Train the model, iterating on the data in batches of 32 samples
start_time = time.time() # how long does it take?
history = model.fit(X3, # the (now oversampled) dataset
Y2, #true or false values for the dataset
epochs=100, #number of iteration over data
batch_size=32, #number of trainings between tests
verbose=1, #prints one line per epoch of progress bar
validation_split=0.1 ) #ratio of test to train
print("--- %s seconds ---" % (time.time() - start_time))
#summarise history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
###Output
_____no_output_____ |
2_Curso/Laboratorio/SAGE-noteb/IPYNB/PROBA/118-PROBA-Paseo-2dim.ipynb | ###Markdown
En este ejercicio estudiamos paseos aleatorios en dos dimensiones. El paseante comienza en el origen $(0,0)$ de $\mathbb{R}^2$ y se mueve por los puntos con coordenadas enteras. En cada momento de tiempo ($t=0,1,2,3,\dots$) elige aleatoriamente uno de sus cuatro puntos (con coordenadas enteras) vecinos y se desplaza a él. El problema que queremos estudiar es la determinación de la probabilidad de que el paseante vuelva en algún momento al origen. El problema que encontramos es que el tiempo que tarda en volver puede ser astronómicamente grande y usando el ordenador estamos limitados. Para poder tratar este problema debemos fijar un tiempo máximo de espera $T$ ($10^5$ en el ejemplo de más abajo) , y nos dejaremos convencer de que la probabilidad de retorno es $1$ si vemos que va aumentando al dejar crecer $T$ y parece que se aproxima a $1$. Estudiar el caso $3$-dimensional de este mismo problema.Estudiar la variante en la que consideramos dos paseantes aleatorios que comienzan en $t=0 $ en el origen $(0,0)$, y queremos estudiar la probabilidad de que se reecuentren (estén en el mismo momento de tiempo en el mismo lugar).
###Code
def actualizar(L):
x = randint(1,4)
if x == 1:
L[0] = L[0]+1
return L
elif x == 2:
L[1] = L[1]+1
return L
elif x == 3:
L[0] = L[0]-1
return L
else:
L[1] = L[1]-1
return L
def retorno():
cont = 0
Pini = actualizar([0,0])
while (Pini != [0,0] and cont<= 10^5):
Pini = actualizar(Pini)
cont += 1
#if cont%10000 == 0:
#print cont,Pini
return cont
def probabilidad(N):
contador = 0
for muda in xrange(N):
cont = retorno()
##print cont
if cont != 10^5+1:
contador += 1
if muda%100 == 0:
print muda
return (contador/N).n()
probabilidad(10^3)
###Output
0
100
200
300
400
500
600
700
800
900
|
nobel_physics_prizes/notebooks/4.2-topic-modeling.ipynb | ###Markdown
Topic ModelingIf you recall, the goals of the unsuccessful [exploratory factor analysis](4.1-exploratory-factor-analysis.ipynb) were to:1. **Reduce the dimensionality of the feature space** to help prevent overfitting when building models.2. **Find a representation of the observed variables in a lower dimensional latent space**. Reducing the variables to **latent factors** helps with interpretability of models.The aim of this notebook is to achieve these goals through a **topic modeling approach**. A [topic model](https://en.wikipedia.org/wiki/Topic_model) is an unsupervised method in [natural language processing](https://en.wikipedia.org/wiki/Natural_language_processing) for discovering **latent topics** in a *corpus* of documents. A topic is essentially a collection of words that statistically co-occur frequently together in documents. So in the topic modeling framework, a document consists of topics and topics are composed of words. It is important to understand that topic modeling is not only restricted to words and can be used for any discrete data. In our case, the discrete data (words) are the binary features and the corpus of documents are the physicists. We will use topic modeling to discover **latent topics**, analogous to the **latent factors** in factor analysis, that underlie the physicists data. The number of topics is specified *a priori* and is expected to correspond to the intrinsic dimensionality of the data. As such it is expected to be much lower than the dimensionality of feature data.[Correlation Explanation](https://www.transacl.org/ojs/index.php/tacl/article/view/1244/275) (**CorEx**) is a discriminative and information-theoretic approach to learning latent topics over documents. It is different from most topic models as it does not assume an underlying generative model for the data. It instead learns maximally informative topics through an information-theoretic framework. The CorEx topic model seeks to maximally explain the dependencies of words in documents through latent topics. CorEx does this by maximizing a lower bound on the [total correlation](https://en.wikipedia.org/wiki/Total_correlation) (multivariate [mutual information](https://en.wikipedia.org/wiki/Mutual_information)) of the words and topics.There are many advantages of the CorEx model that make it particularly attractive. The most relevant ones for this study are:- **No generative model is assumed for the data**, which means means no validation of assumptions that may or may not be true. The latent topics are learnt entirely from the data. This makes the model extremely flexible and powerful.- The method can be used for any **sparse binary dataset** and its algorithm naturally and efficiently takes advantage of the sparsity in the data.- Binary latent topics are learnt, which leads to **highly interpretable models**. A document can consist of no topics, all topics, or any number of topics in between.- **No tuning of numerous hyperparameters**. There is only one hyperparameter, the *number of topics*, and there is a principled way to choose this.More details on the mathematical and implementation details of the CorEx model can be found in section 2 of [Anchored Correlation Explanation: Topic Modeling with Minimal Domain Knowledge](https://www.transacl.org/ojs/index.php/tacl/article/view/1244/275) by Gallagher et al. We will be using the python implementation [corextopic](https://github.com/gregversteeg/corex_topic) for the topic modeling.
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.sparse as ss
import seaborn as sns
from corextopic import corextopic as ct
from src.features.features_utils import convert_categoricals_to_numerical
from src.data.progress_bar import progress_bar
%matplotlib inline
###Output
_____no_output_____
###Markdown
Reading in the DataFirst let's read in the training, validation and test features and convert the categorical fields to a numerical form that is suitable for building machine learning models.
###Code
train_features = pd.read_csv('../data/processed/train-features.csv')
train_features = convert_categoricals_to_numerical(train_features)
train_features.head()
validation_features = pd.read_csv('../data/processed/validation-features.csv')
validation_features = convert_categoricals_to_numerical(validation_features)
validation_features.head()
test_features = pd.read_csv('../data/processed/test-features.csv')
test_features = convert_categoricals_to_numerical(test_features)
test_features.head()
###Output
_____no_output_____
###Markdown
Model SelectionThere is a principled way for choosing the *number of topics*. Gallagher et al. state that "Since each topic explains a certain portion of the overall total correlation, we may choose the number of topics by observing diminishing returns to the objective. Furthermore, since the CorEx implementation depends on a random initialization (as described shortly), one may restart the CorEx topic model several times and choose the one that explains the most total correlation." Following this suggestion, we have written a function that fits a CorEx topic model over a *number of topics range*. For each *number of topics*, the function fits a specified *number of topic models* and selects the topic model with the highest total correlation (TC). Finally, the topic model with the *number of topics* corresponding to the overall highest TC is chosen (i.e. the model that produces topics that are most informative about the documents). This function takes a few minutes to run as it is doing an exhaustive search over a wide range of the number of topics, so feel free grab a coffee.
###Code
def find_best_topic_model(features, num_topic_models=10, num_topics_range=range(1, 11), max_iter=200,
eps=1e-05, progress_bar=None):
"""Find the best topic model as measured by total correlation (TC).
Fits a CorEx topic model over a number of topics range. For each number of topics,
fits a specified number of topic models and selects the topic model with the
highest total correlation (TC), ignoring topic models with empty topics. Finally,
the topic model with the value of number of topics corresponding to the overall
highest TC is chosen (namely, the model that produces topics that are most
informative about the documents).
Args:
features (pandas.DataFrame): Binary features dataframe.
num_topic_models (int, optional): Defaults to 10. Number of topics models to
fit for each number of topics.
num_topics_range (range, optional): Defaults to range(1, 11). Range of number
of topics to fit models over.
max_iter (int, optional): Defaults to 200. Maximum number of iterations
before ending.
eps (float, optional): Defaults to 1e-05. Convergence tolerance.
progress_bar (progressbar.ProgressBar, optional): Defaults to None.
Progress bar.
Returns:
corextopic.CorEx: CorEx topic model.
CorEx topic model with the highest total correlation.
"""
if progress_bar:
progress_bar.start()
X = ss.csr_matrix(features.values)
high_tc_topic_models = []
for n_topic in num_topics_range:
if progress_bar:
progress_bar.update(n_topic)
topic_models = []
for n_topic_models in range(1, num_topic_models + 1):
topic_model = ct.Corex(n_hidden=n_topic, max_iter=max_iter, eps=eps, seed=n_topic_models)
topic_model.fit(X, words=features.columns, docs=features.index)
if _has_empty_topics(topic_model): # unstable model so ignore
continue
topic_models.append((topic_model, topic_model.tc))
if not topic_models:
continue
# for given number of topics, find model with highest total correlation (TC)
topic_models.sort(key=lambda x:x[1], reverse=True)
high_tc_topic_models.append((topic_models[0][0], topic_models[0][1]))
# find overall model with highest total correlation (TC)
high_tc_topic_models.sort(key=lambda x:x[1], reverse=True)
high_tc_model = high_tc_topic_models[0][0]
if progress_bar:
progress_bar.finish()
return high_tc_model
def _has_empty_topics(model):
for n_topic in range(model.n_hidden - 1, 0, -1):
if not model.get_topics(topic=n_topic):
return True
return False
num_topics_range=range(1, 31)
topic_model = find_best_topic_model(
train_features, num_topic_models=20, num_topics_range=num_topics_range,
progress_bar=progress_bar(len(num_topics_range), banner_text_begin='Running: ',
banner_text_end=' topics range'))
print('Number of latent factors (topics) = ', topic_model.n_hidden)
print('Total correlation = ', round(topic_model.tc, 2))
###Output
_____no_output_____
###Markdown
So the optimal number of topics is 25. Note that we have tuned the `num_topic_models` so that this number is stable. If for instance the `num_topic_models` is reduced to 10, then the value of the optimal number of topics will change due to the random initializations of the CorEx topic model. Let's now observe the distribution of TCs for each topic to see how much each additional topic contributes to the overall TC. We should keep adding topics until additional topics do not significantly contribute to the overall TC.
###Code
def plot_topics_total_correlation_distribution(
topic_model, ylim=(0, 2.5), title='Topics total correlation distribution',
xlabel='Topic number'):
"""Plot the total correlation distribution of a CorEx topic model.
Args:
topic_model (corextopic.CorEx): CorEx topic model.
ylim (tuple of (`int`, `int`), optional): Defaults to (0, 2.5).
y limits of the axes.
title (str, optional): Defaults to 'Topics total correlation distribution'.
Title for axes.
xlabel (str, optional):. Defaults to 'Topic number'. x-axis label.
"""
plt.bar(range(0, topic_model.tcs.shape[0]), topic_model.tcs)
plt.xticks(range(topic_model.n_hidden))
plt.ylim(ylim)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel('Total correlation (nats)')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.tick_params(bottom=False, left=False)
plot_topics_total_correlation_distribution(topic_model)
###Output
_____no_output_____
###Markdown
Looking at the plot, you can see that this statement is fairly subjective. Should we take 10, 12, 15, 18 or 22 topics? A slightly more principled way would be to look at the cumulative distribution and select the minimum number of topics that explains say 95% of the overall topics total correlation. This is similar to an explained variance cut-off value in principal component analysis. The plot is shown below.
###Code
def plot_topics_total_correlation_cumulative_distribution(
topic_model, ylim=(0, 17), cutoff=None, title='Topics total correlation cumulative distribution',
xlabel='Topic number'):
"""Plot the total correlation cumulative distribution of a CorEx topic model.
Args:
topic_model (corextopic.CorEx): CorEx topic model.
ylim (tuple of (`int`, `int`), optional): Defaults to (0, 2.5).
y limits of the axes.
cutoff (float, optional). Defaults to None. `If float, then 0 < cutoff < 1.0.
The fraction of the cumulative total correlation to use as a cutoff. A
horizontal dashed line will be drawn to indicate this value.
title (str, optional): Defaults to 'Topics total correlation cumulative distribution'.
Title for axes.
xlabel (str, optional): Defaults to 'Topic number'. x-axis label.
"""
plt.bar(range(0, topic_model.tcs.shape[0]), np.cumsum(topic_model.tcs))
if cutoff:
plt.axhline(cutoff * topic_model.tc, linestyle='--', color='r')
plt.xticks(range(topic_model.n_hidden))
plt.ylim(ylim)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel('Total correlation (nats)')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.tick_params(bottom=False, left=False)
plot_topics_total_correlation_cumulative_distribution(topic_model, cutoff=0.95)
###Output
_____no_output_____
###Markdown
Using this criteria suggests that 18 topics would be appropriate. However, again this is fairly subjective. Should we choose a cut-off of 90%, 95% or 99%? All of these different values would change the conclusion of the number of topics to retain. As there are so few topics anyway, it makes more sense to retain all 25 topics and not lose any further information. You will also see shortly that there is some interesting information in the tail of the topics. TopicsNow we will take a look at the produced topics, in descending order of the total correlation they explain, to see how coherent they are. The features in topics are ranked in descending order of their [mutual information](https://en.wikipedia.org/wiki/Mutual_information) with the topic. So features with higher values of mutual information are more associated with the topic than features with lower values. Do not be alarmed by the negative values of mutual information. As Gallagher explains in the [notebook example](https://github.com/gregversteeg/corex_topic/blob/master/corextopic/example/corex_topic_example.ipynb), "Theoretically, mutual information is always positive. If the CorEx output returns a negative mutual information from `get_topics()`, then the absolute value of that quantity is the mutual information between the topic and the absence of that word." We add labels to the topics to aid with their interpretability.
###Code
latent_factors = {'is_eu_worker':'European Workers',
'is_eu_alumni':'European Alumni',
'is_alumni':'Alumni',
'is_na_eu_resident':'North American and European Residents',
'is_na_citizen':'North American Citizens',
'is_na_worker':'North American Workers',
'is_as_citizen':'Asian Citizens',
'is_na_alumni':'North American Alumni',
'is_gbr_citizen':'British Citizens',
'is_rus_citizen':'Russian Citizens',
'is_deu_citizen':'German Citizens',
'is_nld_ita_che_citizen':'Netherlands, Italian and Swiss Citizens',
'is_studyholic':'Studyholics',
'is_workhorse':'Workhorses',
'is_aut_citizen':'Austrian Citizens',
'is_eu_citizen':'European Citizens',
'is_gbr_worker':'British Workers',
'is_passport_collector':'Passport Collectors',
'is_born':'Born',
'is_fra_citizen':'French Citizens',
'is_other_citizen':'Other Citizens',
'is_emigrant':'Emigrants',
'is_physics_laureate_teacher':'Physics Laureate Teachers',
'is_physics_laureate_student':'Physics Laureate Students',
'is_astronomer':'Astronomers'
}
def plot_topics(topic_model, topic_labels=None, max_features_per_topic=15, xlim=(-0.5, 1),
ylabel='Feature', figsize=None, plotting_context='notebook'):
"""Plot the topics of a CorEx topic model.
Args:
topic_model (corextopic.CorEx): CorEx topic model.
topic_labels (list of `str`, optional): Defaults to None. Topic labels for each
axis.
max_features_per_topic (int, optional): Maximum number of features to plot
per topic.
xlim (tuple of (`int`, `int`), optional): Defaults to (-0.5, 1).
x limits of the axes.
ylabel (str, optional): Defaults to 'Feature'. y-axis label.
figsize (tuple of (`int`, `int`), optional): Defaults to None. Figure size in
inches x inches.
plotting_context (str, optional): Defaults to `notebook`. Seaborn plotting
context.
"""
with sns.plotting_context(plotting_context):
fig, ax = plt.subplots(nrows=topic_model.n_hidden, ncols=1, sharex=False, figsize=figsize)
plt.subplots_adjust(hspace=200)
for n_topic in range(topic_model.n_hidden):
topic = topic_model.get_topics(n_words=max_features_per_topic, topic=n_topic)
labels = [label[0] for label in topic]
mutual_info = [mi[1] for mi in topic]
ax[n_topic].barh(labels, mutual_info)
ax[n_topic].set_xlim(xlim)
ax[n_topic].set_ylim(-0.5, max_features_per_topic - 0.5)
if topic_labels:
title = topic_labels[n_topic]
else:
title = 'topic_' + str(n_topic)
ax[n_topic].set(title=title, xlabel='Mutual information (nats)',
ylabel=ylabel)
fig.tight_layout()
plot_topics(topic_model, topic_labels=list(latent_factors.values()), figsize=(20, 280),
plotting_context='talk')
###Output
_____no_output_____
###Markdown
As you can see, the topic labels are self-explanatory and correspond mainly with the dominant features of each topic, as measured by the mutual information. As explained before, the features with very low mutual information are not really informative about the topic. The fact we could put a name to every topic shows just how discriminative the topic modeling is. It's impressive how coherent some of the topics are. The *North American Workers*, *North American Alumni*, *Workhorses*, *Studyholics* and *French Citizens* topics are exemplerary examples of such topics. The *Born* topic is definitely the least coherent topic and maybe suggests that the features in this topic were probably not so useful to begin with. Top Documents Per TopicAs with the topic features, the most probable documents (physicists) per topic can also be easily accessed, and it is interesting to take a look at a few of these. As Gallagher says, they "are sorted according to log probabilities which is why the highest probability documents have a score of 0 ($e^0 = 1$) and other documents have negative scores (for example, $e^{-0.5} \approx 0.6$)."OK let's take a look at the top physicists in the *European Workers* (topic 0), *Workhorses* (topic 13) and *Physics Laureate Teachers* (topic 22).
###Code
topic_model.get_top_docs(n_docs=30, topic=0, sort_by='log_prob')
###Output
_____no_output_____
###Markdown
The names here seem reasonable as physicists who have worked in Europe. But as you can see from the probabilities, a lot of the physicists have a similar mutual information with this topic. It's a different story if we use the TC instead. This is more discriminative, but from the warning message you can see that Gallagher does not yet recommend this.
###Code
topic_model.get_top_docs(n_docs=30, topic=0, sort_by='tc')
###Output
_____no_output_____
###Markdown
Below we see the real workhorses of physics. The probabilites here seem to discriminate the physicists a lot better. If you examine the Wikipedia Infobox `Institutions` field of some of these physicists, you will see the breadth of workplaces corroborates this list.
###Code
topic_model.get_top_docs(n_docs=30, topic=13, sort_by='log_prob')
###Output
_____no_output_____
###Markdown
Below we see the great teachers and influencers of physics laureates, many of whom are laureates themselves. Likewise, if you take a look at the Wikipedia Infobox `Doctoral students` and `Other notable students` fields of some of these physicists, you will see the number of laureates they have had an impact on. Interestingly, the first paragraph of [Arnold Sommerfeld's Wikipedia article](https://en.wikipedia.org/wiki/Arnold_Sommerfeld) focuses on this aspect of his career and compares him to *J. J. Thomson*.
###Code
topic_model.get_top_docs(n_docs=30, topic=22, sort_by='log_prob')
###Output
_____no_output_____
###Markdown
Projecting Features to the Topic SpaceCorEx is a discriminative model which means that it estimates the probability a document (i.e. physicist) belongs to a topic given that document's words (i.e. features). The estimated probabilities of topics for each document can be obtained through the topic model's properties `log_p_y_given_x` or `p_y_given_x` or function `predict_proba`. A binary determination of which documents belong to each topic is obtained using a softmax and can be accessed through the topic model's `labels` property or function `transform` (or `predict`). We will now use the latter to reduce the dimensionality of the original binary features by projecting them into the latent space spanned by the binary topics of the topic model.
###Code
def project_features_to_topic_space(features, topic_model, columns=None):
"""Project the binary features to the latent space spanned by the binary
topics of the topic model.
Args:
features (pandas.DataFrame): Binary features dataframe.
topic_model (corextopic.CorEx): CorEx topic model.
topic_labels (list of `str`, optional): Defaults to None. Topic labels
to use as columns for the dataframe.
Returns:
pandas.DataFrame: Binary features dataframe containing the topics.
"""
X = ss.csr_matrix(features.values)
X_topics = topic_model.transform(X)
features_topics = pd.DataFrame(X_topics, index=features.index, columns=columns)
features_topics = features_topics.applymap(lambda x: 'yes' if x == True else 'no')
return features_topics
train_features_topics = project_features_to_topic_space(
train_features, topic_model, list(latent_factors.keys()))
train_features_topics.head()
validation_features_topics = project_features_to_topic_space(
validation_features, topic_model, list(latent_factors.keys()))
validation_features_topics.head()
test_features_topics = project_features_to_topic_space(
test_features, topic_model, list(latent_factors.keys()))
test_features_topics.head()
###Output
_____no_output_____
###Markdown
You may be wondering why we did not just use the estimated probabilities as the reduced dimension features. Most likely a model built from those features would be more accurate than one built from the binary features. Interpretability is the answer. For example, it does not make much sense to talk about the probability of a physicist being a *European Worker* or not. S/he is either a *European Worker* or not. It is more natural to say, for instance, that a physicist is a Nobel Laureate because s/he is a *European Worker*, a *North American Citizen* and a *Physics Laureate Teacher*, etc.The *European Alumni* and *Astronomer* topics are interesting as they both consist of only one feature. Therefore, you would expect a one-to-one correspondence between the labels in the topic and the label in the original feature. However, this is not always the case as the topic has actually "flipped" the label for some of the physicists. We are not exactly sure why it happens. Clearly it is a quirk of the topic modeling.
###Code
len(train_features) - (train_features_topics.is_eu_alumni == train_features.alumnus_in_EU.map(
{1: 'yes', 0:'no'})).sum()
len(train_features) - (train_features_topics.is_astronomer == train_features.is_astronomer.map(
{1: 'yes', 0:'no'})).sum()
###Output
_____no_output_____
###Markdown
Persisting the DataNow we have the training, validation and test features dataframes in the topic model space, we will persist them for future use.
###Code
train_features_topics.to_csv('../data/processed/train-features-topics.csv')
validation_features_topics.to_csv('../data/processed/validation-features-topics.csv')
test_features_topics.to_csv('../data/processed/test-features-topics.csv')
###Output
_____no_output_____ |
Big_data_Assignment_6.ipynb | ###Markdown
Initial SparkSession
###Code
spark = SparkSession.builder \
.master("local") \
.appName("Assignment 6") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
import os
os.getcwd()
###Output
_____no_output_____
###Markdown
Reading dataset
###Code
#laod data as RDD
sc=spark.sparkContext
rdd_d=sc.textFile('/content/sample_data/california_housing_test.csv')
rdd_d.take(10)
#load the data as dataframe
data=spark.read.csv('/content/sample_data/california_housing_test.csv',header=True)
data.show(10)
#remove the first row
f=rdd_d.first()
rdd_d=rdd_d.filter(lambda r:r!=f)
## convert rdd to dataframe
a=rdd_d.map(lambda x:x.split(","))
b=a.map(lambda p: Row(longitude=p[0],latitude=p[1],housing_median_age=p[2],total_rooms=p[3],total_bedrooms=p[4],population=p[5],households=p[6],median_income=p[7],median_house_value=p[8]))
rdd_df=spark.createDataFrame(b)
rdd_df.show(10)
# convert dataframe to rdd
rdd1=rdd_df.rdd
rdd1
# convert spark dataframe to pandas dataframe
pandas=data.toPandas()
pandas
###Output
_____no_output_____
###Markdown
Task
###Code
#Task 1 ---Select first 10 rows of dataset
rdd_df.show(10)
#Task 2---show the schema of the dataset
rdd_df.printSchema()
# Task 3---Group by and get max, min, count of a column in the dataset
rdd_df.groupBy('households').count().show()
rdd_df.select(max('population'),min('population')).show()
#Task 4---Filter your dataset by some conditions based on your column
rdd_df.filter(rdd_df['population']>500).show()
#Task 5---Apply group by with having clause
rdd_df.groupBy('median_income').agg(sum('households')).alias('sum_households').show()
#Task 6 ---Apply order by
rdd_df.select('*').orderBy('households').show()
#Task 7---Select distinct records by a column
rdd_df.select('housing_median_age').distinct().show()
#Task 8---Transform the data type of columns from int to string
from pyspark.sql.types import *
new_rdd_df=rdd_df.withColumn('households',rdd_df['households'].cast(StringType()))
new_rdd_df.printSchema()
###Output
_____no_output_____ |
notebooks/LazyGreedy.ipynb | ###Markdown
Lazy versus greedy evaluation
###Code
%matplotlib inline
%run notebook_setup.py
import starry
starry.config.quiet = True
###Output
_____no_output_____
###Markdown
tl;drVersion `1.0` of the code evaluates things *lazily* by default, meaning that all internal values are nodes in a graph, stored as `theano` tensors. Lazy mode is required for interfacing with `pymc3` to do inference (refer to the several tutorials on `pymc3` sampling). If you *really* need the value of a `theano` object, you can always call its `eval()` method, but keep in mind that operation can be somewhat slow.If, on the other hand, you're not interested in using `pymc3` or in any of the derivatives of `starry` models, you can disable lazy evaluation by typing```pythonstarry.config.lazy = False```at the top of your script, *before* you instantiate any `starry` maps. If you do that, `starry` will behave as it did in previous versions: you don't have to call the `eval()` method or worry about any tensor nonsense. Lazy mode One of the big changes in version `1.0` of `starry` is *lazy evaluation* mode, which is now the default. [Lazy evaluation](https://en.wikipedia.org/wiki/Lazy_evaluation) means that the evaluation of all expressions in the code is delayed until a numerical value is needed (i.e., when outputting or plotting the result). This is as opposed to [greedy or eager evaluation](https://en.wikipedia.org/wiki/Eager_evaluation), in which all expressions are evaluated on-the-fly, as soon as the code encounters them. In lazy evaluation mode, expressions are compiled and stored in memory as *nodes in a graph*, which are only executed when a numerical value is required. This strategy allows for some cool compile-time optimization under the hood. But by far the greatest advantage of lazy evaluation (at least in our case) is that it makes it easy to autodifferentiate expressions using backpropagation. This lets us compute derivatives of all expressions extremely efficiently, and those can be seemlessly integrated into derivative-based MCMC sampling schemes such as Hamiltonian Monte Carlo or NUTS.Version `1.0` of `starry` is built on top of the [theano](https://github.com/Theano/Theano) machine learning library, which handles all of the graph compiling and backpropagation. There's lots of other software that does similar things (such as `tensorflow` and `pytorch`), but the advantage of `theano` is that it is also the backbone of [exoplanet](https://github.com/dfm/exoplanet) and [pymc3](https://github.com/pymc-devs/pymc3). This allows us to easily integrate `starry` with all the cool inference machinery of those two packages.Let's look at some examples of how lazy evaluation works in `starry`. Let's instantiate a regular `starry` map:
###Code
import starry
map = starry.Map(ydeg=1)
###Output
_____no_output_____
###Markdown
We can give this map a simple dipole by assigning a value to the coefficient of the $Y_{1,0}$ spherical harmonic:
###Code
map[1, 0] = 0.5
###Output
_____no_output_____
###Markdown
Since the coefficient of the $Y_{0,0}$ harmonic is fixed at unity, our spherical harmonic coefficients are now the vector $y = (1, 0, \frac{1}{2}, 0)$. Here's what that looks like:
###Code
map.show()
###Output
_____no_output_____
###Markdown
Recall that the spherical harmonic coefficients are stored in the `y` attribute of the map. Let's take a look:
###Code
map.y
###Output
_____no_output_____
###Markdown
That doesn't look right, but it *is*: the vector $y$ is stored internally as a `theano` tensor and doesn't yet have a numerical value:
###Code
type(map.y)
###Output
_____no_output_____
###Markdown
In order to access its value, I can call its `eval` method:
###Code
map.y.eval()
###Output
_____no_output_____
###Markdown
Which is what we expected. A similar thing happens when we call a method such as `flux`:
###Code
map.flux(xo=0.4, yo=0.3, ro=0.1, theta=30)
map.flux(xo=0.4, yo=0.3, ro=0.1, theta=30).eval()
###Output
_____no_output_____
###Markdown
As we mentioned above, it's not generally a good idea to call the `eval()` method, since it can be quite slow. The whole point of lazy evaluation mode is so that `starry` can be easily integrated with `pymc3`. When building a `pymc3` model, `pymc3` handles all of the evaluations internally, so there's no need to call `eval()`. Within a `pymc3` model context, users can pass `pymc3` variables, `theano` variables, and/or `numpy` arrays to any `starry` method; casting is handled internally in all cases. Check out the tutorials on inference with `pymc3` for more information. If, on the other hand, you're not planning on integrating `starry` with `pymc3`, you should probably run it in greedy mode. See below. Greedy mode To run `starry` in greedy (i.e., not lazy) mode, you can add the following line somewhere near the top of your script:
###Code
# *-*-*- DON'T DO THIS AT HOME! -*-*-*
# You shouldn't mix greedy and lazy maps in
# the same session, as you risk angering theano.
# I'm able to get away with it in this example
# because I'm just evaluating a few variables.
# But if I were to try to do anything else, things
# would probably break!
starry.config._allow_changes = True
starry.config.lazy = False
###Output
_____no_output_____
###Markdown
(Note that if you try to change the evaluation mode after you've instantiated a `starry` map, the code will complain.)In greedy mode, things behave as they did in previous versions of the code. Check it out:
###Code
map = starry.Map(ydeg=1)
map[1, 0] = 0.5
map.y
type(map.y)
###Output
_____no_output_____
###Markdown
All methods are automatically compiled and return numerical outputs:
###Code
map.flux(xo=0.4, yo=0.3, ro=0.1, theta=30)
###Output
_____no_output_____ |
MLCourse/Outliers.ipynb | ###Markdown
Dealing with Outliers Sometimes outliers can mess up an analysis; you usually don't want a handful of data points to skew the overall results. Let's revisit our example of income data, with some random billionaire thrown in:
###Code
%matplotlib inline
import numpy as np
incomes = np.random.normal(27000, 15000, 10000)
incomes = np.append(incomes, [1000000000])
import matplotlib.pyplot as plt
plt.hist(incomes, 50)
plt.show()
###Output
_____no_output_____
###Markdown
That's not very helpful to look at. One billionaire ended up squeezing everybody else into a single line in my histogram. Plus it skewed my mean income significantly:
###Code
incomes.mean()
###Output
_____no_output_____
###Markdown
It's important to dig into what is causing your outliers, and understand where they are coming from. You also need to think about whether removing them is a valid thing to do, given the spirit of what it is you're trying to analyze. If I know I want to understand more about the incomes of "typical Americans", filtering out billionaires seems like a legitimate thing to do.Here's something a little more robust than filtering out billionaires - it filters out anything beyond two standard deviations of the median value in the data set:
###Code
def reject_outliers(data):
u = np.median(data)
s = np.std(data)
filtered = [e for e in data if (u - 2 * s < e < u + 2 * s)]
return filtered
filtered = reject_outliers(incomes)
plt.hist(filtered, 50)
plt.show()
###Output
_____no_output_____
###Markdown
That looks better. And, our mean is more, well, meangingful now as well:
###Code
np.mean(filtered)
###Output
_____no_output_____ |
RNN_predict.ipynb | ###Markdown
For short time, it works well, but for long time it is not good enough....
###Code
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import tensorflow as tf
import pandas as pd
tf.__version__
import matplotlib
import matplotlib.pyplot as plt
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# data
train_dataset=pd.read_csv('Time_series_train',names='F')
test_dataset=pd.read_csv('Time_series_vali',names='F')
train_dataset=pd.DataFrame.to_numpy(train_dataset)
test_dataset=pd.DataFrame.to_numpy(test_dataset)
train_dataset
# prepare batch
t_min, t_max=0, 600
resolution=0.1
#batch_size=50 #
#n_steps=20 #
def next_batch(batch_size,n_steps,dataset):
sample_list=range(1,len(dataset)-n_steps)
from random import sample
train_sample=np.array(sample(sample_list,batch_size)).reshape(batch_size,1)
index=train_sample+np.arange(0, n_steps + 1)
ys=dataset[index]
return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1)
n_steps=20
t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1)
x_batch,y_batch=next_batch(1,n_steps,train_dataset)
t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1)
plt.title("Testing the model", fontsize=14)
plt.plot(t_instance[:-1], x_batch.reshape(n_steps), "bo", markersize=10, label="X_batch")
plt.plot(t_instance[1:], y_batch.reshape(n_steps), "r*", markersize=10, label="y_batch")
plt.legend(loc="upper left")
plt.xlabel("Time")
plt.show()
t_min, t_max = 0, 300
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t*5)
def next_batch(batch_size, n_steps,train):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1)
X_batch,y_batch=next_batch(1,20,train_dataset)
np.c_[X_batch, y_batch]
reset_graph()
n_steps =20
n_inputs = 1
n_neurons = 200
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.nn.rnn_cell.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
#%%
learning_rate = 0.001
loss = tf.reduce_mean(tf.square(outputs - y)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
#%%
saver = tf.train.Saver()
#%%
n_iterations = 2000
batch_size = 30
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps,train_dataset)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(iteration, "\tMSE:", mse)
saver.save(sess, "./my_time_series_model") # not shown in the book
#%%
t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1)
X_new,y_new=next_batch(1,n_steps ,test_dataset)
X_new
with tf.Session() as sess: # not shown in the book
saver.restore(sess, "./my_time_series_model") # not shown
y_pred = sess.run(outputs, feed_dict={X: X_new})
#%%
y_pred.reshape(n_steps)
plt.title("Testing the model", fontsize=14)
plt.plot(t_instance[:-1], X_new.reshape(n_steps), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], y_new.reshape(n_steps), "r*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "g.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("Time")
plt.show()
###Output
_____no_output_____
###Markdown
**Multi layer LSTM cell**
###Code
n_steps = 50
n_inputs = 1
n_neurons = 250
n_outputs = 1
n_layers = 4
n_iterations=1000
learning_rate = 0.001
X_batch,y_batch=next_batch(1,n_steps,train_dataset)
reset_graph()
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons)
#!
keep_prob = tf.placeholder_with_default(1.0, shape=())
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
lstm_cells = [tf.nn.rnn_cell.BasicLSTMCell(num_units=n_neurons)
for layer in range(n_layers)]
multi_cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1000
batch_size = 20
n_epochs = 5
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps,train_dataset)
_, mse = sess.run([training_op, loss],
feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0: # not shown in the book
print(iteration, "Training MSE:", mse) # not shown
saver.save(sess, "./my_dropout_time_series_model")
t_instance = np.linspace(0, 0 + resolution * (n_steps + 1), n_steps + 1)
X_new,y_new=next_batch(1,n_steps ,test_dataset)
X_new
with tf.Session() as sess: # not shown in the book
saver.restore(sess, "./my_dropout_time_series_model") # not shown
y_pred = sess.run(outputs, feed_dict={X: X_new})
plt.title("Testing the model", fontsize=14)
plt.plot(t_instance[:-1], X_new.reshape(n_steps), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], y_new.reshape(n_steps), "y*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("Time")
plt.show()
number=20
sequence=train_dataset[0:n_steps].reshape(n_steps)
sequence=sequence.tolist()
with tf.Session() as sess:
saver.restore(sess, "./my_dropout_time_series_model")
for iteration in range(number):
X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence.append(y_pred[0, -1, 0])
X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1)
X_batch
plt.title("Prediction", fontsize=14)
plt.plot( np.array(sequence[0:n_steps]).reshape(n_steps), "ro", markersize=10, label="start")
plt.plot( np.array(sequence).reshape(number+n_steps), "b.", markersize=10, label="true")
plt.plot(train_dataset[0:number+n_steps].reshape(number+n_steps),"y*", markersize=10, label="prediction")
plt.legend(loc="upper left")
sequence
###Output
_____no_output_____ |
Assignments/GEOS518-Assignment1-AutocorrelationFunction.ipynb | ###Markdown
Jupyter Notebook Assignment Number 1 Due: 07 February 2018**Instructions:** In this Jupyter notebook, you will should perform the following tasks with your chosen dataset:1. Import it and create a plot2. Compute and show key descriptive statistics about the time series that might include the mean, variance, and/or histograms3. Assess the stationarity of the time series in one or both of the following ways: (1) compute and report the slope of a regression line through time (and it's significance), (2) compute and report the [Augmented Dickey-Fuller test](https://machinelearningmastery.com/time-series-data-stationary-python/)4. Compute and plot the autocorrelation function of the time series. Note that if your examination reveals that the time series likely has a significant trend through time, you should either attempt to detrend it, or discuss the ramifications of not doing so at length.5. Discuss in detail the results of your analyses in terms of the physical reasons that you observe the trends you see. For example, can you explain why the autocorrelation function behaves as you observe it to based on physical intuition?Characteristics of exemplary work:* You use available libraries and (in comments) justify their use* Your code is well commented and you describe each step that you are doing in your code* Your plots are adequately sized. Axes, plots and legends labeled with font sizes that are readable. The marker and line styles and sizes are appropriate* You use Markdown cells to describe, in detail what each code cell is doing. Markdown cells and code cells are adequately* Your notebook response contains a Markdown cell that provides an overview of the problem statement, your approach, and key findings (i.e., these three things should correspond to sections)
###Code
# Libraries that might be helpful. Note, ignore any errors regarding pandas.core.datatools being deprecated
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.api as sm
###Output
_____no_output_____ |
src/lab3_nvp_v2.ipynb | ###Markdown
Lab exercise: Real NVP
###Code
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
import itertools
import random
import math
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
%matplotlib inline
pltparams = {
'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large',
'figure.titlesize': 'x-large',
'savefig.dpi': 300,
}
plt.rcParams.update(pltparams)
# sns.set(font_scale = 1.2)
# samples1-2 shape must be (n samples, 2)
def plot_samples(samples1, samples2=None):
fig, ax = plt.subplots()
ax.scatter(samples1[:,0], samples1[:,1], marker="x", color="blue")
if samples2 is not None:
ax.scatter(samples2[:,0], samples2[:,1], marker="x", color="red")
return fig
import sklearn.datasets
target_samples, target_classes = sklearn.datasets.make_moons(1000, noise=0.1)
target_samples = torch.from_numpy(target_samples).float()
fig = plot_samples(target_samples)
###Output
_____no_output_____
###Markdown
Foward from z to x\begin{align*}g = z * exp(s(z * mask)) + t (z * mask)\\x = z * mask + g * (1 - mask)\end{align*}Inverse from x to z\begin{align*}g^{-1} = (x - t(x * mask)) * exp(-s(x * mask))\\z = x * mask + g^{-1} * (1 - mask)\end{align*}
###Code
class RealNVPLayer(nn.Module):
def __init__(self, size, reverse=False):
super().__init__()
self.mask = torch.zeros(size, requires_grad=False)
mid = int(size / 2)
self.mid = mid
if reverse:
self.mask[mid:] = 1.
else:
self.mask[:mid] = 1.
## the two operations
self.scale = nn.Sequential(
nn.Linear(size, 10),
nn.Tanh(),
nn.Linear(10, size),
)
self.translate = nn.Sequential(
nn.Linear(size, 10),
nn.Tanh(),
nn.Linear(10, size),
)
# project from the latent space to the observed space,
# i.e. x = g(z)
def forward(self, z):
n_mask = 1. - self.mask
z_masked = z * self.mask
transform = z * torch.exp(self.scale(z_masked)) + self.translate(z_masked)
x = z_masked + transform * n_mask
return x
# project from the observed space to the latent space,
# this function also return the log det jacobian of this inv function
def inv(self, x):
n_mask = 1. - self.mask
x_masked = x * self.mask
scaled = self.scale(x_masked)
reversetransform = (x - self.translate(x_masked)) * torch.exp(-scaled)
z = x_masked + reversetransform * n_mask
log_det_jacobian = torch.sum(-scaled * n_mask, dim=-1)
return z, log_det_jacobian
# Test!
layer = RealNVPLayer(2, reverse=False)
with torch.no_grad():
x = torch.rand(1, 2)
z, _ = layer.inv(x)
xx = layer(z)
print("In the 3 vectors below, the first element must be equal")
print("This two vectors should be equal:")
print(x)
print(xx)
print("This vector should be different to the two above")
print(z)
print()
layer = RealNVPLayer(2, reverse=True)
with torch.no_grad():
x = torch.rand(1, 2)
z, _ = layer.inv(x)
xx = layer(z)
print("In the 3 vectors below, the second element must be equal")
print("This two vectors should be equal:")
print(x)
print(xx)
print("This vector should be different to the two above")
print(z)
class RealNVP(nn.Module):
def __init__(self, size, n_layers):
super().__init__()
self.prior = torch.distributions.normal.Normal(torch.zeros(2), torch.ones(2))
self.layers = nn.ModuleList(
RealNVPLayer(size, i % 2 == 0)
for i in range(n_layers)
)
def forward(self, z):
x = z
for i in range(len(self.layers)):
x = self.layers[i](x)
return x
def inv(self, x):
log_det_jacobian = 0.
z = x
for i in reversed(range(len(self.layers))):
z, j = self.layers[i].inv(z)
# remember here, we just have to sum all log det jacobians!
log_det_jacobian = log_det_jacobian + j
return z, log_det_jacobian
def sample(self, n_samples):
z = self.prior.sample((n_samples,))
x = self(z)
return x
def log_prior(self, x):
z, det = self.inv(x)
ret = self.prior.log_prob(z).sum(1) + det
return ret
trained_distrib = RealNVP(2, 50)
optimizer = torch.optim.Adam(trained_distrib.parameters(), lr=1e-3)
batch_size = 1000
losses = list()
for _ in range(500):
for i in range(0, target_samples.shape[0], batch_size):
batch = target_samples[i:i+batch_size]
optimizer.zero_grad()
loss = -trained_distrib.log_prior(batch).mean()
losses.append(loss.item())
loss.backward()
torch.nn.utils.clip_grad_norm_(trained_distrib.parameters(), 5)
optimizer.step()
plt.plot(np.arange(len(losses)), losses)
# sample from the model
with torch.no_grad():
samples = trained_distrib.sample(1000)
fig = plot_samples(target_samples, samples)
fig.savefig("sample.pdf")
# print the latent space corresponding to each half moon in a different color
with torch.no_grad():
source_sample1, _ = trained_distrib.inv(target_samples[target_classes == 0])
source_sample2, _ = trained_distrib.inv(target_samples[target_classes == 1])
fig = plot_samples(source_sample1, source_sample2)
fig.savefig("latent.pdf")
###Output
_____no_output_____ |
Fairoza/Module-03/Dictionaries.ipynb | ###Markdown
Solution for Dictionaries Exercises 1. Create a dictionary which contains 'english spelling' for keys and 'number' in integer as values from 1 to 10 in random orders. For example, one: 1, two: 2. Set the dictionary as variable 'numbers'.
###Code
numbers = {'one': 1,
'three': 3,
'seven': 7,
'two': 2,
'four': 4,
'eight': 8,
'ten': 10,
'five': 5,
'six': 6,
'nine': 9}
print(numbers)
###Output
{'one': 1, 'three': 3, 'seven': 7, 'two': 2, 'four': 4, 'eight': 8, 'ten': 10, 'five': 5, 'six': 6, 'nine': 9}
###Markdown
2. What is the second key of *numbers*?
###Code
print(numbers[1])
numbers["three"]
###Output
_____no_output_____
###Markdown
3. Find the number of unique keys in *numbers*.
###Code
numbers_2 = {'one': 1,
'one': 1,
'three': 3,
'seven': 7,
'two': 2,
'four': 4,
'eight': 8,
'ten': 10,
'five': 5,
'six': 6,
'nine': 9}
print(numbers)
len(numbers_2)
num_keys_2 = len(set(numbers_2))
print(num_keys_2)
###Output
_____no_output_____
###Markdown
4. Find whether 'eleven' is a key in *numbers*.
###Code
contains_eleven = numbers.get('eleven')
print(contains_eleven)
contains_eleven = "eleven" in numbers
print(contains_eleven)
###Output
False
###Markdown
5. Set a new key and value pair of 'eleven': 11
###Code
numbers['eleven'] = 11
print(numbers)
###Output
{'one': 1, 'three': 3, 'seven': 7, 'two': 2, 'four': 4, 'eight': 8, 'ten': 10, 'five': 5, 'six': 6, 'nine': 9, 'eleven': 11}
###Markdown
6. Create and sort a list of keys in *numbers*
###Code
sorted_keys = sorted(numbers.keys())
print(sorted_keys)
###Output
['eight', 'eleven', 'five', 'four', 'nine', 'one', 'seven', 'six', 'ten', 'three', 'two']
###Markdown
7. Get the first element in the sorted list of keys
###Code
print(sorted_keys[0])
###Output
eight
###Markdown
8. Find the highest value in the list of keys
###Code
print(sorted_keys[-1])
###Output
two
###Markdown
9. Create and sort a list of values in *numbers*
###Code
sorted_values = sorted(numbers.values())
print(sorted_values)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
###Markdown
10. Get the first element in the sorted list of values.
###Code
print(sorted_values[0])
###Output
1
###Markdown
11. Access the value of 'nine'
###Code
numbers['nine']
###Output
_____no_output_____ |
CXRlabeler/withoutLM-mimic-cxr.ipynb | ###Markdown
Classification Model MIMIC-CXR Dataset
###Code
from fastai.basics import *
from fastai.text.all import *
import warnings
warnings.filterwarnings('ignore')
# Read in the train and test sets.
path = Path('/home/jupyter/data/mimic-cxr')
df_lm = pd.read_csv(path/"lm.csv")
df_cl = pd.read_csv(path/"labels.csv")
df_train = pd.read_csv(path/"train.csv")
df_test = pd.read_csv(path/"test.csv")
###Output
_____no_output_____
###Markdown
Multi-Label Classifier
###Code
# fix result
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
SEED = 42
seed_everything(SEED)
###Output
_____no_output_____
###Markdown
1. Data Block
###Code
labels = ["Atelectasis", "Cardiomegaly", "Consolidation",
"Edema", "Enlarged Cardiomediastinum", "Fracture", "Lung Lesion",
"Lung Opacity", "No Finding", "Pleural Effusion", "Pleural Other",
"Pneumonia", "Pneumothorax", "Support Devices"]
bs_cl = (TextBlock.from_df('reports'),
MultiCategoryBlock(encoded=True, vocab=labels))
db_cl = DataBlock(blocks=bs_cl,
get_x=ColReader('text'),
get_y=ColReader(labels),
splitter=ColSplitter('is_valid'))
db_cl.summary(df_cl.iloc[:100])
###Output
Setting-up type transforms pipelines
Collecting items from dicom_id \
0 02aa804e-bde0afdd-112c0b34-7bc16630-4e384014
1 2a2277a9-b0ded155-c0de8eb9-c124d10e-82c5caab
2 68b5c4b1-227d0485-9cc38c3f-7b84ab51-4b472714
3 096052b7-d256dc40-453a102b-fa7d01c6-1b22c6b4
4 8959e402-2175d68d-edba5a6c-baab51c3-9359f700
.. ...
95 325f2526-1ea870c1-06d8ff34-1b02764d-9e336cbc
96 38a433f3-1d000dff-a774352f-35c0d838-353e023f
97 4a25692b-e596ad27-5bc2eba3-e518093c-623f4d6a
98 0d24804d-197942ca-7f32a773-b93ba943-40022beb
99 a664e3c4-97f37598-e008ddb5-674d8b24-8a49114f
reports \
0 No acute cardiopulmonary process.
1 No acute cardiopulmonary abnormality.
2 No acute intrathoracic process.
3 Focal consolidation at the left lung base, possibly representing aspiration or\n pneumonia.\n \n Central vascular engorgement.
4 No evidence of acute cardiopulmonary process.
.. ...
95 Frontal and lateral views of the chest were obtained. Left basilar\n atelectasis is seen. There is left basilar and left mid lung\n atelectasis/scarring. Chain sutures are noted overlying the right\n upper-to-mid hemithorax. There is subtle focal patchy opacity projecting over\n the right lateral lower chest, which in the same location on the lateral view,\n appeared to be a linear opacity dating back to ___. Finding could\n represent atelectasis/scarring; however, on the current study, it appears more\n amorphous and a small focus of infection is not excluded. The cardiac and\n medi...
96 No pneumonia.
97 1. The left subclavian PICC line now has its tip in the distal SVC. Overall,\n cardiac and mediastinal contours are likely unchanged given differences in\n positioning. There is increased prominence of the pulmonary vasculature and\n indistinctness in the perihilar region consistent with interval appearance of\n mild interstitial and perihilar edema. No pleural effusions. No\n pneumothorax. Surgical chain sutures are again seen in the right upper lobe\n consistent with prior surgery. This is some fullness to the right suprahilar\n region which is unchanged and likely corresponds to ...
98 Fullness in the right lower paratracheal region of the mediastinum is\n comparable to the appearance in ___ when a chest CT scan showed no\n appreciable adenopathy in the mediastinum, instead a distended azygos vein. \n There was adenopathy in the adjacent right hilus, and the appearance of that\n structure is stable over these 3 examinations. Aside from small areas of\n linear scarring, lungs are clear. There is no edema or pneumonia and no\n appreciable pleural effusion. Heart size is normal.
99 As compared to the previous radiograph, the lung volumes have slightly\n decreased. There is minimal fluid overload in both the vascular and\n interstitial compartment. Normal size of the cardiac silhouette. Moderate\n tortuosity of the thoracic aorta. No pleural effusions. No pneumonia.
Atelectasis Cardiomegaly Consolidation Edema \
0 0 0 0 0
1 0 0 0 0
2 0 0 0 0
3 0 0 1 0
4 0 0 0 0
.. ... ... ... ...
95 1 0 0 0
96 0 0 0 0
97 0 0 1 1
98 0 0 0 0
99 0 0 0 0
Enlarged Cardiomediastinum Fracture Lung Lesion Lung Opacity \
0 0 0 0 0
1 0 0 0 0
2 0 0 0 0
3 0 0 0 0
4 0 0 0 0
.. ... ... ... ...
95 0 0 0 1
96 0 0 0 0
97 0 0 0 0
98 1 0 0 0
99 0 0 0 0
No Finding Pleural Effusion Pleural Other Pneumonia Pneumothorax \
0 1 0 0 0 0
1 1 0 0 0 0
2 1 0 0 0 0
3 0 0 0 0 0
4 1 0 0 0 0
.. ... ... ... ... ...
95 0 0 0 1 0
96 1 0 0 0 0
97 0 0 0 0 0
98 0 0 0 0 0
99 1 0 0 0 0
Support Devices is_valid
0 0 False
1 0 False
2 0 False
3 0 False
4 0 False
.. ... ...
95 0 False
96 0 False
97 1 False
98 0 False
99 0 False
[100 rows x 17 columns]
Found 100 items
2 datasets of sizes 100,0
Setting up Pipeline: ColReader -- {'cols': 'text', 'pref': '', 'suff': '', 'label_delim': None} -> Tokenizer -> Numericalize
###Markdown
2. Data Loader
###Code
dl_cl = db_cl.dataloaders(df_cl)
dl_cl.show_batch()
###Output
_____no_output_____
###Markdown
3. Training
###Code
text_classifier_learner??
loss_func = BCEWithLogitsLossFlat(thresh=0.8)
metrics = [partial(accuracy_multi, thresh=0.8),
F1ScoreMulti(average='macro'),
PrecisionMulti (average='macro'),
RecallMulti (average='macro'),
RocAucMulti (average='macro')]
learn_cl = text_classifier_learner(dl_cl, AWD_LSTM, metrics=metrics, loss_func=loss_func)
learn_cl.to_fp16()
learn_cl.fine_tune(10)
###Output
_____no_output_____ |
1601_bounding_box_regression.ipynb | ###Markdown
 Object detection: Bounding box regression with Keras, TensorFlow, and Deep Learning by [PyImageSearch.com](http://www.pyimagesearch.com) Welcome to **[PyImageSearch Plus](http://pyimg.co/plus)** Jupyter Notebooks!This notebook is associated with the [Object detection: Bounding box regression with Keras, TensorFlow, and Deep Learning](http://pyimg.co/gk8s6) blog post published on 10-05-20.Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.We recommend that you execute (press โถ๏ธ) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:* [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.htmlnotebook-user-interface)* [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)As a reminder, these PyImageSearch Plus Jupyter Notebooks are not for sharing; please refer to the **Copyright** directly below and **Code License Agreement** in the last cell of this notebook. Happy hacking!*Adrian****Copyright:*** *The contents of this Jupyter Notebook, unless otherwise indicated, are Copyright 2020 Adrian Rosebrock, PyimageSearch.com. All rights reserved. Content like this is made possible by the time invested by the authors. If you received this Jupyter Notebook and did not purchase it, please consider making future content possible joining PyImageSearch Plus at [http://pyimg.co/plus/](http://pyimg.co/plus) today.* Install the necessary packages
###Code
!pip install tensorflow==2.2.0
###Output
_____no_output_____
###Markdown
Download the code zip file
###Code
!wget https://s3-us-west-2.amazonaws.com/static.pyimagesearch.com/bounding-box-regression/bounding-box-regression.zip
!unzip -qq bounding-box-regression.zip
%cd bounding-box-regression
###Output
_____no_output_____
###Markdown
Blog Post Code Import Packages
###Code
# import the necessary packages
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import mimetypes
import argparse
import imutils
import cv2
import os
###Output
_____no_output_____
###Markdown
Function to display images in Jupyter Notebooks and Google Colab
###Code
def plt_imshow(title, image):
# convert the image frame BGR to RGB color space and display it
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
plt.title(title)
plt.grid(False)
plt.show()
###Output
_____no_output_____
###Markdown
Define our `Config` class
###Code
class Config:
# define the base path to the input dataset and then use it to derive
# the path to the images directory and annotation CSV file
BASE_PATH = "dataset"
IMAGES_PATH = os.path.sep.join([BASE_PATH, "images"])
ANNOTS_PATH = os.path.sep.join([BASE_PATH, "airplanes.csv"])
# define the path to the base output directory
BASE_OUTPUT = "output"
# define the path to the output serialized model, model training plot,
# and testing image filenames
MODEL_PATH = os.path.sep.join([BASE_OUTPUT, "detector.h5"])
PLOT_PATH = os.path.sep.join([BASE_OUTPUT, "plot.png"])
TEST_FILENAMES = os.path.sep.join([BASE_OUTPUT, "test_images.txt"])
# initialize our initial learning rate, number of epochs to train
# for, and the batch size
INIT_LR = 1e-4
NUM_EPOCHS = 25
BATCH_SIZE = 32
# instantiate the config class
config = Config()
###Output
_____no_output_____
###Markdown
Implementing our bounding box regression training script with Keras and TensorFlow
###Code
# load the contents of the CSV annotations file
print("[INFO] loading dataset...")
rows = open(config.ANNOTS_PATH).read().strip().split("\n")
# initialize the list of data (images), our target output predictions
# (bounding box coordinates), along with the filenames of the
# individual images
data = []
targets = []
filenames = []
# loop over the rows
for row in rows:
# break the row into the filename and bounding box coordinates
row = row.split(",")
(filename, startX, startY, endX, endY) = row
# derive the path to the input image, load the image (in OpenCV
# format), and grab its dimensions
imagePath = os.path.sep.join([config.IMAGES_PATH, filename])
image = cv2.imread(imagePath)
(h, w) = image.shape[:2]
# scale the bounding box coordinates relative to the spatial
# dimensions of the input image
startX = float(startX) / w
startY = float(startY) / h
endX = float(endX) / w
endY = float(endY) / h
# load the image and preprocess it
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image)
# update our list of data, targets, and filenames
data.append(image)
targets.append((startX, startY, endX, endY))
filenames.append(filename)
# convert the data and targets to NumPy arrays, scaling the input
# pixel intensities from the range [0, 255] to [0, 1]
data = np.array(data, dtype="float32") / 255.0
targets = np.array(targets, dtype="float32")
# partition the data into training and testing splits using 90% of
# the data for training and the remaining 10% for testing
split = train_test_split(data, targets, filenames, test_size=0.10,
random_state=42)
# unpack the data split
(trainImages, testImages) = split[:2]
(trainTargets, testTargets) = split[2:4]
(trainFilenames, testFilenames) = split[4:]
# write the testing filenames to disk so that we can use then
# when evaluating/testing our bounding box regressor
print("[INFO] saving testing filenames...")
f = open(config.TEST_FILENAMES, "w")
f.write("\n".join(testFilenames))
f.close()
# load the VGG16 network, ensuring the head FC layers are left off
vgg = VGG16(weights="imagenet", include_top=False,
input_tensor=Input(shape=(224, 224, 3)))
# freeze all VGG layers so they will *not* be updated during the
# training process
vgg.trainable = False
# flatten the max-pooling output of VGG
flatten = vgg.output
flatten = Flatten()(flatten)
# construct a fully-connected layer header to output the predicted
# bounding box coordinates
bboxHead = Dense(128, activation="relu")(flatten)
bboxHead = Dense(64, activation="relu")(bboxHead)
bboxHead = Dense(32, activation="relu")(bboxHead)
bboxHead = Dense(4, activation="sigmoid")(bboxHead)
# construct the model we will fine-tune for bounding box regression
model = Model(inputs=vgg.input, outputs=bboxHead)
# initialize the optimizer, compile the model, and show the model
# summary
opt = Adam(lr=config.INIT_LR)
model.compile(loss="mse", optimizer=opt)
print(model.summary())
# train the network for bounding box regression
print("[INFO] training bounding box regressor...")
H = model.fit(
trainImages, trainTargets,
validation_data=(testImages, testTargets),
batch_size=config.BATCH_SIZE,
epochs=config.NUM_EPOCHS,
verbose=1)
# serialize the model to disk
print("[INFO] saving object detector model...")
model.save(config.MODEL_PATH, save_format="h5")
# plot the model training history
N = config.NUM_EPOCHS
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.title("Bounding Box Regression Loss on Training Set")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc="lower left")
plt.show()
###Output
_____no_output_____
###Markdown
Implementing our bounding box predictor with Keras and TensorFlow
###Code
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--input", required=True,
# help="path to input image/text file of image filenames")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"input": "output/test_images.txt"
}
# determine the input file type, but assume that we're working with
# single input image
filetype = mimetypes.guess_type(args["input"])[0]
imagePaths = [args["input"]]
# if the file type is a text file, then we need to process *multiple*
# images
if "text/plain" == filetype:
# load the filenames in our testing file and initialize our list
# of image paths
filenames = open(args["input"]).read().strip().split("\n")
imagePaths = []
# loop over the filenames
for f in filenames:
# construct the full path to the image filename and then
# update our image paths list
p = os.path.sep.join([config.IMAGES_PATH, f])
imagePaths.append(p)
# load our trained bounding box regressor from disk
print("[INFO] loading object detector...")
model = load_model(config.MODEL_PATH)
# loop over the images that we'll be testing using our bounding box
# regression model
for imagePath in imagePaths:
# load the input image (in Keras format) from disk and preprocess
# it, scaling the pixel intensities to the range [0, 1]
image = load_img(imagePath, target_size=(224, 224))
image = img_to_array(image) / 255.0
image = np.expand_dims(image, axis=0)
# make bounding box predictions on the input image
preds = model.predict(image)[0]
(startX, startY, endX, endY) = preds
# load the input image (in OpenCV format), resize it such that it
# fits on our screen, and grab its dimensions
image = cv2.imread(imagePath)
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# scale the predicted bounding box coordinates based on the image
# dimensions
startX = int(startX * w)
startY = int(startY * h)
endX = int(endX * w)
endY = int(endY * h)
# draw the predicted bounding box on the image
cv2.rectangle(image, (startX, startY), (endX, endY),
(0, 255, 0), 2)
# show the output image
plt_imshow("Output", image)
###Output
_____no_output_____ |
.ipynb_checkpoints/(200819) 3.-checkpoint.ipynb | ###Markdown
โ ํ
์ ํ๋ก์ฐ๋ฅผ ํ์ฉํ ์ ๊ฒฝ๋ง ๊ตฌ์ฑ ๋ณต์ต 1. ํ
์ํ๋ก์ฐ 1.x ๋ฒ์ ๊ธฐ๋ณธ๋ฌธ๋ฒ ๊ทธ๋ํ ์์ฑ ์์ญ -------------- ๊ทธ๋ํ ์คํ ์์ญ 2. ํ
์ ํ๋ก์ฐ๋ก ์ด์ฉํ ๋จ์ธต ์ ๊ฒฝ๋ง 3. ํ
์ ํ๋ก์ฐ๋ฅผ ์ด์ฉํ ๋ค์ธต ์ ๊ฒฝ๋ง 4. ํ
์ ํ๋ก์ฐ๋ฅผ ์ด์ฉํด CNN์ ์ด์ฉํ ์ ๊ฒฝ๋ง MNIST ๋ฐ์ดํฐ๋ load_mnist ํจ์๋ง์ ์ด์ฉํด์ ์ฝ๊ฒ ๋ฐ์ดํฐ๋ฅผ ๋ก๋ํ ์ ์๊ฒ ๊ตฌ์ฑํ๋๋ฐ ํ์
์์ ์ ๊ฒฝ๋ง์ ๊ตฌํํ ๋๋ load_mnist์ ๊ฐ์ ํจ์๋ฅผ ์ง์ ์์ฑํด์ผํ๋ฏ๋ก ์ ๊ฒฝ๋ง์ผ๋ก ๋ฐ์ดํฐ๋ฅผ ๋ก๋ํ๋ 4๊ฐ ํจ์๋ฅผ ์์ฑ 1. image_load 2. label_load 3. next_batch 4. shuffle_batch โ ์ดํ๋ฆฌ ๋ฐ์ดํฐ๋ฅผ ๋ก๋ํ ์ ์๋ 4๊ฐ์ ํจ์ ์์ฑ 1. ์ง๋ณ ์ดํ๋ฆฌ ์ฌ์ง ํ์ธ darknamer.exe ํ๋ก๊ทธ๋จ์ ์ด์ฉํด์ ์ฌ์ง์ด๋ฆ์ ์ ๋ถ ์ซ์๋ก ๋ณ๊ฒฝ 1 ~ 10000๋ฒ์ผ๋ก ์ด๋ฆ ๋ณ๊ฒฝ 1-9500๋ฒ๊น์ง trainํด๋์ ๋ฃ๊ณ 9500-10000๋ฒ๊น์ง test ํด๋ ๋ฃ๊ณ 2. ํ์
์์ CNN์ ํ์ฉํด์ ๊ฐ์ฅ ๋ง์ด ์์ฉํ๋ ๋ถ๋ถ: ๊ณต์ฅ์์ ๋ง๋ ์ ํ์ ๋ํ ๋ถ๋ํ ๋ถ๋ฅ --> ํ์ง๊ด๋ฆฌ์์ 3. 256*256 ์ฌ์ด์ฆ์ ์ด๋ฏธ์ง๋ค์ 32*32๋ก ์ผ๊ด ์กฐ์ D:/data/leafs/images/train D:/data/leafs/images/test train, test ๋ฐ์ดํฐ ๋๋ค resize
###Code
import cv2
import os
import numpy as np
path = "D:/data/leafs/images/train"
file_list = os.listdir(path)
for k in file_list:
img = cv2.imread(path + '/' + k)
width, height = img.shape[:2]
resize_img = cv2.resize(img, (32 , 32), interpolation=cv2.INTER_CUBIC)
cv2.imwrite('D:/data/leafs/images/train_resize/' + k, resize_img)
import cv2
import os
import numpy as np
path = "D:/data/leafs/images/test"
file_list = os.listdir(path)
for k in file_list:
img = cv2.imread(path + '/' + k)
width, height = img.shape[:2]
resize_img = cv2.resize(img, (32 , 32), interpolation=cv2.INTER_CUBIC)
cv2.imwrite('D:/data/leafs/images/test_resize/' + k, resize_img)
###Output
_____no_output_____
###Markdown
๊ฑด๊ฐํ ์ดํ๋ฆฌ : train 1 ~ 9500 ์ง๋ณ ์ดํ๋ฆฌ : train 9501 ~ 19000 test 1 ~ 500 test 501 ~ 1000 4. ํ๋ จ ๋ฐ์ดํฐ์ ๋ผ๋ฒจ๊ณผ ํ
์คํธ ๋ฐ์ดํฐ์ ๋ผ๋ฒจ์ csv๋ก ์์ฑํ์์ค
###Code
path = 'd:/data/leafs/images/train_label.csv'
file = open(path, 'w')
for _ in range(0, 9500):
file.write(str(1) + '\n')
for _ in range(0, 9500):
file.write(str(0) + '\n')
file.close()
path = 'd:/data/leafs/images/test_label.csv'
file = open(path, 'w')
for _ in range(0, 500):
file.write(str(1) + '\n')
for _ in range(0, 500):
file.write(str(0) + '\n')
file.close()
###Output
_____no_output_____
###Markdown
5. cifar10 ๋ฐ์ดํฐ๋ฅผ ์ ๊ฒฝ๋ง์ ๋ก๋ํ๊ธฐ ์ํด์ ๋ง๋ 4๊ฐ์ง ํจ์๊ฐ ์๋ loader2.py์ ๋ด์ฉ์ ๋ณต์ฌํด์ loader_leaf.py๋ก ๋ง๋ ํ ์์ ๋ด์ฉ์ ์ดํ๋ฆฌ ๋ฐ์ดํฐ๋ฅผ ๋ก๋ํ ์ ์๊ฒ๋ ์์ ํ์์ค image_load label_load next_batch shuffle_batch 6. ์ดํ๋ฆฌ ๋ฐ์ดํฐ๊ฐ ์ load ๋๋์ง ํ์ธํ์์ค
###Code
import loader_leaf as ll
train_image = 'd:/data/leafs/images/train_resize/'
test_image = 'd:/data/leafs/images/test_resize/'
train_label = 'd:/data/leafs/images/train_label.csv'
test_label = 'd:/data/leafs/images/test_label.csv'
print(ll.image_load(train_image).shape)
print(ll.image_load(test_image).shape)
print(ll.label_load(train_label).shape)
print(ll.label_load(test_label).shape)
###Output
(19000, 32, 32, 3)
(1000, 32, 32, 3)
(19000, 2)
(1000, 2)
###Markdown
โ ์ฌ์ง์ ๋ถ๋ฅํ ์ ์๋ ์ ๊ฒฝ๋ง ๊ตฌ์ฑ ํ๊ฒฝ 1. ํ
์ ํ๋ก์ฐ 1.x ๋ฒ์ ์ผ๋ก๋ง ์ํํ๋ ๋ฐฉ๋ฒ 2. ํ
์ ํ๋ก์ฐ 1.x ๋ฒ์ (1.14.0) + keras(2.3.1) ๋ก ๊ตฌํํ๋ ๋ฐฉ๋ฒ ---> ๊ตฌ๊ธ์ด keras ์ธ์ 3. ํ
์ ํ๋ก์ฐ 2.x ๋ฒ์ ์ผ๋ก ๊ตฌํํ๋ ๋ฐฉ๋ฒ โ ์ดํ๋ฆฌ ๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฅํ๋ ์ ๊ฒฝ๋ง ์ค๊ณ๋์ ํฐ ๊ทธ๋ฆผ โป ๋ฌธ์ 142. ์ฐ๋ฆฌ๊ฐ ๊ฐ์ง๊ณ ์๋ ์ฝ๋๋ฅผ ํ์ฉํด์ ์๋์ ๊ทธ๋ฆผ์ ์ ๊ฒฝ๋ง์ผ๋ก ์ดํ๋ฆฌ ๋ฐ์ดํฐ๋ฅผ ๋ก๋ํ๋ keras ์ ๊ฒฝ๋ง์ ๊ตฌํํ์์ค
###Code
from keras.datasets import cifar10
from keras.models import Sequential, save_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import numpy as np
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
import matplotlib.pyplot as plt
import loader_leaf as loader3
import warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
warnings.filterwarnings('ignore')
plt.rcParams['figure.figsize'] = (20, 10)
plt.rcParams.update({'font.size':20})
batch_size = 28
num_classes = 2
epochs = 15
train_image = 'D:\\data\\leafs\\images\\train_resize\\'
test_image = 'D:\\data\\leafs\\images\\test_resize\\'
train_label = 'D:\\data\\leafs\\images\\train_label.csv'
test_label = 'D:\\data\\leafs\\images\\test_label.csv'
x_train = loader3.image_load(train_image)
y_train = loader3.label_load(train_label)
x_test = loader3.image_load(test_image)
y_test = loader3.label_load(test_label)
print(loader3.image_load(train_image).shape)
print(loader3.image_load(test_image).shape)
print(loader3.label_load(train_label).shape)
print(loader3.label_load(test_label).shape)
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# One hot Encoding
# y_train = np_utils.to_categorical(y_train)
# y_test = np_utils.to_categorical(y_test)
model = Sequential()
model.add(Conv2D(32, (5, 5), padding='same', input_shape=x_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (5, 5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
hist = model.fit(x_train, y_train, validation_data=(x_test, y_test), nb_epoch=epochs, batch_size=batch_size, verbose=2)
scores = model.evaluate(x_test, y_test, verbose=0) # verbose: ์งํ๊ณผ์ ํ์
print("CNN Error: %.2f%%" % (100 - scores[1] * 100))
print(hist.history)
'''
{'loss': [0.4628944396972656, 0.3074853718280792, 0.2499154955148697],
'accuracy': [0.7932631373405457, 0.8667894601821899, 0.8957894444465637],
'val_loss': [0.6377381682395935, 0.4691646695137024, 0.4328630864620209],
'val_accuracy': [0.6100000143051147, 0.7739999890327454, 0.8379999995231628]}
'''
# ํ์ต ์ ํ์ฑ ๊ฐ๊ณผ ๊ฒ์ฆ ์ ํ์ฑ ๊ฐ์ ํ๋กฏํ
ํฉ๋๋ค.
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# ํ์ต ์์ค ๊ฐ๊ณผ ๊ฒ์ฆ ์์ค ๊ฐ์ ํ๋กฏํ
ํฉ๋๋ค.
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
(19000, 32, 32, 3)
(1000, 32, 32, 3)
(19000, 2)
(1000, 2)
Train on 19000 samples, validate on 1000 samples
Epoch 1/15
- 149s - loss: 0.6557 - accuracy: 0.6829 - val_loss: 1.3807 - val_accuracy: 0.2730
Epoch 2/15
- 146s - loss: 0.5450 - accuracy: 0.7405 - val_loss: 1.6024 - val_accuracy: 0.4080
Epoch 3/15
- 142s - loss: 0.5213 - accuracy: 0.7615 - val_loss: 1.7125 - val_accuracy: 0.2850
Epoch 4/15
- 141s - loss: 0.5056 - accuracy: 0.7735 - val_loss: 1.5888 - val_accuracy: 0.2390
Epoch 5/15
- 147s - loss: 0.4909 - accuracy: 0.7856 - val_loss: 1.9389 - val_accuracy: 0.2410
Epoch 6/15
- 150s - loss: 0.4790 - accuracy: 0.7962 - val_loss: 1.6832 - val_accuracy: 0.2100
Epoch 7/15
- 169s - loss: 0.4707 - accuracy: 0.8010 - val_loss: 1.7175 - val_accuracy: 0.1850
Epoch 8/15
- 154s - loss: 0.4602 - accuracy: 0.8114 - val_loss: 1.7233 - val_accuracy: 0.1970
Epoch 9/15
- 148s - loss: 0.4509 - accuracy: 0.8146 - val_loss: 1.6784 - val_accuracy: 0.1590
Epoch 10/15
- 149s - loss: 0.4377 - accuracy: 0.8251 - val_loss: 1.7354 - val_accuracy: 0.1800
Epoch 11/15
- 148s - loss: 0.4357 - accuracy: 0.8263 - val_loss: 1.6926 - val_accuracy: 0.2200
Epoch 12/15
- 143s - loss: 0.4254 - accuracy: 0.8315 - val_loss: 1.8305 - val_accuracy: 0.1570
Epoch 13/15
- 142s - loss: 0.4189 - accuracy: 0.8341 - val_loss: 1.6726 - val_accuracy: 0.1540
Epoch 14/15
- 140s - loss: 0.4077 - accuracy: 0.8401 - val_loss: 1.9250 - val_accuracy: 0.1600
Epoch 15/15
- 137s - loss: 0.3973 - accuracy: 0.8465 - val_loss: 1.8647 - val_accuracy: 0.1490
###Markdown
โ tensorflow 2.x ํ
์ ํ๋ก์ฐ 2.x์ ๊ฐ์ฅ ํฐ ์ฅ์ ์ค ํ๋๋ ์ฆ์ ์คํ๋ชจ๋๋ฅผ ์ง์ ํ
์ ํ๋ก์ฐ 1.x์์๋ ๊ณ์ฐ ๊ทธ๋ํ๋ฅผ ์ ์ธํ๊ณ ์ด๊ธฐํ ํ ํ์ ์ธ์
์ ํตํด ๊ฐ์ ํ๋ฅด๊ฒ ํ๋ ๋ฑ์ ๋ง์ ์์
์ ํ์ ์ฆ์ ์คํ๋ชจ๋๋ฅผ ํตํด ํ
์ ํ๋ก์ฐ๋ฅผ ํ์ด์ฌ์ฒ๋ผ ์ฌ์ฉํ ์ ์๊ฒ ๋์๋ค๋๊ฒ 2.x ๋ฒ์ ์์ ๊ฐ์ฅ ํฌ๊ฒ ๋ณ๊ฒฝ๋ ์ฌํญ ๋ฅ๋ฌ๋ ๋ฉด์ ๋ฌธ์ : ํ
์ ํ๋ก์ฐ 1.x๋ฒ์ ๊ณผ 2.x๋ฒ์ ์ ๊ฐ์ฅ ํฐ ์ฐจ์ด? ์ฆ์ ์คํ๋ชจ๋๋ฅผ ํตํด ์์
๊ฒฐ๊ณผ๋ฅผ ๋ฐ๋ก ํ์ธํ ์ ์์ ์์ 1. ์ฆ์ ์คํ๋ชจ๋๋ฅผ ํตํ ์ฐ์ฐ keras_study ๊ฐ์ํ๊ฒฝ์ผ๋ก ์คํ
###Code
import tensorflow as tf
import numpy as np
a = tf.constant(3)
b = tf.constant(2)
print(tf.add(a,b))
print(tf.substract(a,b))
print(tf.multiply(a,b).numpy())
print(tf.divide(a,b).numpy())
###Output
_____no_output_____
###Markdown
์์ 2. @tf.function ๊ธฐ๋ฅ @tf.function์ ํ
์ ํ๋ก์ฐ์์ ์๋์ผ๋ก ๊ทธ๋ํ๋ฅผ ์์ฑํด์ฃผ๋ ๊ธฐ๋ฅ ํ์ด์ฌ์ผ๋ก ๊ตฌ์ฑ๋ ์ฝ๋๋ฅผ ๊ณ ํจ์จ ํ
์ ํ๋ก์ฐ ๊ทธ๋ํ๋ก ๋ณํํด์ ์ฌ์ฉํ ์ ์๋ค ํ
์ ํ๋ก์ฐ ๊ทธ๋ํ๋ก ๋ณํํ์ฌ ์ฌ์ฉํ๋ค๋ ๊ฒ์ GPU ์ฐ์ฐ์ด ๊ฐ๋ฅํ๋ค๋ ์๋ฏธ์ด๊ธฐ ๋๋ฌธ์ ์๋์ธก๋ฉด์์ ๊ต์ฅํ ํจ๊ณผ๋ฅผ ๋ณผ ์ ์๋ค.
###Code
import tensorflow as tf
import numpy as np
@tf.function
def square_pos(x):
if x > 0:
x *= x
else:
x *= -1
return x
print(square_pos(tf.constant(2)))
###Output
_____no_output_____
###Markdown
โ ํ
์ ํ๋ก์ฐ 2.x ๋ฒ์ ์ผ๋ก ํผ์
ํธ๋ก ๊ตฌํํ๊ธฐ ์ธ๊ณต ์ ๊ฒฝ ์ธํฌ ํ๋๋ฅผ ๊ตฌํ ํผ์
ํธ๋ก ์์ ์ฐ์ด๋ ์
๋ ฅ๋ฐ์ดํฐ์ ํ๊ฒ 4๊ฐ์ง 1. AND ๊ฒ์ดํธ 2. OR ๊ฒ์ดํธ 3. Non AND ๊ฒ์ดํธ ๋จ์ธต ------------------------------- 4. XOR ๊ฒ์ดํธ ๋ค์ธต โ AND ๊ฒ์ดํธ
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential # ์ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์ฑ
from tensorflow.keras.layers import Dense # ์์ ์ฐ๊ฒฐ๊ณ์ธต
from tensorflow.keras.optimizers import SGD # ๊ฒฝ์ฌ ๊ฐ์๋ฒ
from tensorflow.keras.losses import mse # ์ค์ฐจํจ์์
# ๋ฐ์ดํฐ ์ค๋น
x = np.array([[0,0],[1,0],[0,1],[1,1]])
y = np.array([[0],[0],[0],[1]])
# ๋ชจ๋ธ ๊ตฌ์ฑํ๊ธฐ
model = Sequential()
# ๋จ์ธต ํผ์
ํธ๋ก ๊ตฌํํ๊ธฐ
model.add(Dense(1, input_shape = (2,), activation = 'linear')) # ์ซ์๊ฐ ๋ค์ด์์ ์ซ์๋ฅผ ์์ธก
# ๋ชจ๋ธ ์ค๋นํ๊ธฐ
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc']) # metrics: list ํํ๋ก ํ๊ฐ์งํ๋ฅผ ์ ๋ฌ
#ํ์ต ์ํค๊ธฐ
model.fit(x, y, epochs = 500)
###Output
WARNING:tensorflow:From C:\Users\knitwill\anaconda3\lib\site-packages\tensorflow\python\ops\init_ops.py:1251: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
Epoch 1/500
4/4 [==============================] - 7s 2s/sample - loss: 0.1101 - acc: 0.7500
Epoch 2/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1094 - acc: 0.7500
Epoch 3/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1089 - acc: 0.7500
Epoch 4/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1083 - acc: 0.7500
Epoch 5/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1077 - acc: 0.7500
Epoch 6/500
4/4 [==============================] - 0s 12ms/sample - loss: 0.1072 - acc: 0.7500
Epoch 7/500
4/4 [==============================] - 0s 249us/sample - loss: 0.1067 - acc: 0.7500
Epoch 8/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1062 - acc: 0.7500
Epoch 9/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1057 - acc: 0.7500
Epoch 10/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1052 - acc: 0.7500
Epoch 11/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1048 - acc: 0.7500
Epoch 12/500
4/4 [==============================] - 0s 0s/sample - loss: 0.1043 - acc: 0.7500
Epoch 13/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1039 - acc: 0.7500
Epoch 14/500
4/4 [==============================] - 0s 0s/sample - loss: 0.1035 - acc: 0.7500
Epoch 15/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1030 - acc: 0.7500
Epoch 16/500
4/4 [==============================] - 0s 750us/sample - loss: 0.1026 - acc: 1.0000
Epoch 17/500
4/4 [==============================] - 0s 499us/sample - loss: 0.1023 - acc: 1.0000
Epoch 18/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1019 - acc: 1.0000
Epoch 19/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1015 - acc: 1.0000
Epoch 20/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1011 - acc: 1.0000
Epoch 21/500
4/4 [==============================] - 0s 2ms/sample - loss: 0.1008 - acc: 1.0000
Epoch 22/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1004 - acc: 1.0000
Epoch 23/500
4/4 [==============================] - 0s 501us/sample - loss: 0.1001 - acc: 1.0000
Epoch 24/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0998 - acc: 1.0000
Epoch 25/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0994 - acc: 1.0000
Epoch 26/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0991 - acc: 1.0000
Epoch 27/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0988 - acc: 1.0000
Epoch 28/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0985 - acc: 1.0000
Epoch 29/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0982 - acc: 1.0000
Epoch 30/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0979 - acc: 1.0000
Epoch 31/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0976 - acc: 1.0000
Epoch 32/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0973 - acc: 1.0000
Epoch 33/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0970 - acc: 1.0000
Epoch 34/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0968 - acc: 1.0000
Epoch 35/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0965 - acc: 1.0000
Epoch 36/500
4/4 [==============================] - 0s 499us/sample - loss: 0.0962 - acc: 1.0000
Epoch 37/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0959 - acc: 1.0000
Epoch 38/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0957 - acc: 1.0000
Epoch 39/500
4/4 [==============================] - 0s 498us/sample - loss: 0.0954 - acc: 1.0000
Epoch 40/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0952 - acc: 1.0000
Epoch 41/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0949 - acc: 1.0000
Epoch 42/500
4/4 [==============================] - 0s 749us/sample - loss: 0.0947 - acc: 1.0000
Epoch 43/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0944 - acc: 1.0000
Epoch 44/500
4/4 [==============================] - 0s 501us/sample - loss: 0.0942 - acc: 1.0000
Epoch 45/500
4/4 [==============================] - 0s 497us/sample - loss: 0.0939 - acc: 1.0000
Epoch 46/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0937 - acc: 1.0000
Epoch 47/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0935 - acc: 1.0000
Epoch 48/500
4/4 [==============================] - 0s 499us/sample - loss: 0.0932 - acc: 1.0000
Epoch 49/500
4/4 [==============================] - 0s 499us/sample - loss: 0.0930 - acc: 1.0000
Epoch 50/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0928 - acc: 1.0000
Epoch 51/500
4/4 [==============================] - 0s 500us/sample - loss: 0.0926 - acc: 1.0000
Epoch 52/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0923 - acc: 1.0000
Epoch 53/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0921 - acc: 1.0000
Epoch 54/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0919 - acc: 1.0000
Epoch 55/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0917 - acc: 1.0000
Epoch 56/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0915 - acc: 1.0000
Epoch 57/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0913 - acc: 1.0000
Epoch 58/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0910 - acc: 1.0000
Epoch 59/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0908 - acc: 1.0000
Epoch 60/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0906 - acc: 1.0000
Epoch 61/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0904 - acc: 1.0000
Epoch 62/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0902 - acc: 1.0000
Epoch 63/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0900 - acc: 1.0000
Epoch 64/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0898 - acc: 1.0000
Epoch 65/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0896 - acc: 1.0000
Epoch 66/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0894 - acc: 1.0000
Epoch 67/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0893 - acc: 1.0000
Epoch 68/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0891 - acc: 1.0000
Epoch 69/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0889 - acc: 1.0000
Epoch 70/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0887 - acc: 1.0000
Epoch 71/500
4/4 [==============================] - 0s 0s/sample - loss: 0.0885 - acc: 1.0000
Epoch 72/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0883 - acc: 1.0000
Epoch 73/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0881 - acc: 1.0000
Epoch 74/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0879 - acc: 1.0000
Epoch 75/500
4/4 [==============================] - 0s 252us/sample - loss: 0.0878 - acc: 1.0000
Epoch 76/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0876 - acc: 1.0000
Epoch 77/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0874 - acc: 1.0000
Epoch 78/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0872 - acc: 1.0000
Epoch 79/500
4/4 [==============================] - 0s 0s/sample - loss: 0.0871 - acc: 1.0000
Epoch 80/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0869 - acc: 1.0000
Epoch 81/500
4/4 [==============================] - 0s 250us/sample - loss: 0.0867 - acc: 1.0000
###Markdown
โป ๋ฌธ์ 143. AND ๊ฒ์ดํธ ํผ์
ํธ๋ก ์ ๊ตฌํํ ์ ๊ฒฝ๋ง์์ ๋ง๋ค์ด๋ธ ๊ฐ์ค์น๋ฅผ ์ถ๋ ฅํ์์ค
###Code
model.get_weights()
###Output
_____no_output_____
###Markdown
โป ๋ฌธ์ 144. ์์ AND ๊ฒ์ดํธ ํผ์
ํธ๋ก ์ ๊ฒฝ๋ง์ด ์์ธกํ ๊ฒฐ๊ณผ๋ฅผ ์ถ๋ ฅํ์์ค
###Code
model.evaluate(x, y)
result = model.predict(x)
print(result)
###Output
4/4 [==============================] - 3s 698ms/sample - loss: 0.0640 - acc: 1.0000
[[-0.18919873]
[ 0.27109194]
[ 0.24799117]
[ 0.7082819 ]]
###Markdown
โป ๋ฌธ์ 145. for ๋ฌธ์ ์ด์ฉํด์ ์๋์ ๊ฒฐ๊ณผ์ ์์๋ค์ ๋ฝ์๋ด์์ค
###Code
for i in result:
for j in i:
print(round(j))
###Output
-0.0
0.0
0.0
1.0
###Markdown
โป ๋ฌธ์ 146. OR ๊ฒ์ดํธ๋ฅผ ๊ตฌํํ์์ค
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential # ์ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์ฑ
from tensorflow.keras.layers import Dense # ์์ ์ฐ๊ฒฐ๊ณ์ธต
from tensorflow.keras.optimizers import SGD # ๊ฒฝ์ฌ ๊ฐ์๋ฒ
from tensorflow.keras.losses import mse # ์ค์ฐจํจ์์
# ๋ฐ์ดํฐ ์ค๋น
x = np.array([[0,0],[1,0],[0,1],[1,1]])
y = np.array([[0],[1],[1],[1]])
# ๋ชจ๋ธ ๊ตฌ์ฑํ๊ธฐ
model = Sequential()
# ๋จ์ธต ํผ์
ํธ๋ก ๊ตฌํํ๊ธฐ
model.add(Dense(1, input_shape = (2,), activation = 'linear'))
# ๋ชจ๋ธ ์ค๋นํ๊ธฐ
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
#ํ์ต ์ํค๊ธฐ
model.fit(x, y, epochs = 500)
model.evaluate(x, y)
result = model.predict(x)
for i in result:
for j in i:
print(round(j))
###Output
Epoch 1/500
4/4 [==============================] - 0s 95ms/sample - loss: 1.5322 - acc: 0.2500
Epoch 2/500
4/4 [==============================] - 0s 250us/sample - loss: 1.4493 - acc: 0.2500
Epoch 3/500
4/4 [==============================] - 0s 500us/sample - loss: 1.3715 - acc: 0.2500
Epoch 4/500
4/4 [==============================] - 0s 500us/sample - loss: 1.2985 - acc: 0.2500
Epoch 5/500
4/4 [==============================] - 0s 250us/sample - loss: 1.2301 - acc: 0.2500
Epoch 6/500
4/4 [==============================] - 0s 500us/sample - loss: 1.1658 - acc: 0.2500
Epoch 7/500
4/4 [==============================] - 0s 500us/sample - loss: 1.1055 - acc: 0.2500
Epoch 8/500
4/4 [==============================] - 0s 500us/sample - loss: 1.0489 - acc: 0.2500
Epoch 9/500
4/4 [==============================] - 0s 250us/sample - loss: 0.9958 - acc: 0.2500
Epoch 10/500
4/4 [==============================] - 0s 250us/sample - loss: 0.9459 - acc: 0.2500
Epoch 11/500
4/4 [==============================] - 0s 500us/sample - loss: 0.8991 - acc: 0.2500
Epoch 12/500
4/4 [==============================] - 0s 500us/sample - loss: 0.8552 - acc: 0.2500
Epoch 13/500
4/4 [==============================] - 0s 500us/sample - loss: 0.8139 - acc: 0.2500
Epoch 14/500
4/4 [==============================] - 0s 250us/sample - loss: 0.7752 - acc: 0.5000
Epoch 15/500
4/4 [==============================] - 0s 499us/sample - loss: 0.7388 - acc: 0.5000
Epoch 16/500
4/4 [==============================] - 0s 500us/sample - loss: 0.7046 - acc: 0.5000
Epoch 17/500
4/4 [==============================] - 0s 250us/sample - loss: 0.6725 - acc: 0.5000
Epoch 18/500
4/4 [==============================] - 0s 250us/sample - loss: 0.6424 - acc: 0.5000
Epoch 19/500
4/4 [==============================] - 0s 250us/sample - loss: 0.6141 - acc: 0.5000
Epoch 20/500
4/4 [==============================] - 0s 250us/sample - loss: 0.5874 - acc: 0.5000
Epoch 21/500
4/4 [==============================] - 0s 499us/sample - loss: 0.5624 - acc: 0.5000
Epoch 22/500
4/4 [==============================] - 0s 250us/sample - loss: 0.5389 - acc: 0.5000
Epoch 23/500
4/4 [==============================] - 0s 250us/sample - loss: 0.5168 - acc: 0.5000
Epoch 24/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4960 - acc: 0.5000
Epoch 25/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4765 - acc: 0.5000
Epoch 26/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4581 - acc: 0.5000
Epoch 27/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4408 - acc: 0.5000
Epoch 28/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4246 - acc: 0.5000
Epoch 29/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4093 - acc: 0.5000
Epoch 30/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3949 - acc: 0.5000
Epoch 31/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3813 - acc: 0.5000
Epoch 32/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3685 - acc: 0.5000
Epoch 33/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3565 - acc: 0.5000
Epoch 34/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3452 - acc: 0.5000
Epoch 35/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3345 - acc: 0.5000
Epoch 36/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3244 - acc: 0.5000
Epoch 37/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3149 - acc: 0.7500
Epoch 38/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3060 - acc: 0.7500
Epoch 39/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2975 - acc: 0.7500
Epoch 40/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2896 - acc: 0.7500
Epoch 41/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2820 - acc: 0.7500
Epoch 42/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2749 - acc: 0.7500
Epoch 43/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2682 - acc: 0.7500
Epoch 44/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2618 - acc: 0.7500
Epoch 45/500
4/4 [==============================] - 0s 750us/sample - loss: 0.2558 - acc: 0.7500
Epoch 46/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2501 - acc: 0.7500
Epoch 47/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2448 - acc: 0.7500
Epoch 48/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2397 - acc: 0.7500
Epoch 49/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2348 - acc: 0.7500
Epoch 50/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2303 - acc: 0.7500
Epoch 51/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2259 - acc: 0.7500
Epoch 52/500
4/4 [==============================] - 0s 749us/sample - loss: 0.2218 - acc: 0.7500
Epoch 53/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2179 - acc: 0.7500
Epoch 54/500
4/4 [==============================] - 0s 750us/sample - loss: 0.2142 - acc: 0.7500
Epoch 55/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2106 - acc: 0.5000
Epoch 56/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2073 - acc: 0.5000
Epoch 57/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2041 - acc: 0.5000
Epoch 58/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2010 - acc: 0.5000
Epoch 59/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1982 - acc: 0.5000
Epoch 60/500
4/4 [==============================] - 0s 250us/sample - loss: 0.1954 - acc: 0.5000
Epoch 61/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1927 - acc: 0.5000
Epoch 62/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1902 - acc: 0.5000
Epoch 63/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1878 - acc: 0.5000
Epoch 64/500
4/4 [==============================] - 0s 499us/sample - loss: 0.1855 - acc: 0.5000
Epoch 65/500
4/4 [==============================] - 0s 499us/sample - loss: 0.1833 - acc: 0.5000
Epoch 66/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1812 - acc: 0.5000
Epoch 67/500
4/4 [==============================] - 0s 750us/sample - loss: 0.1792 - acc: 0.5000
Epoch 68/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1773 - acc: 0.5000
Epoch 69/500
4/4 [==============================] - 0s 749us/sample - loss: 0.1754 - acc: 0.5000
Epoch 70/500
4/4 [==============================] - 0s 749us/sample - loss: 0.1736 - acc: 0.5000
Epoch 71/500
4/4 [==============================] - 0s 750us/sample - loss: 0.1719 - acc: 0.5000
Epoch 72/500
4/4 [==============================] - 0s 750us/sample - loss: 0.1703 - acc: 0.5000
Epoch 73/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1687 - acc: 0.5000
Epoch 74/500
4/4 [==============================] - 0s 499us/sample - loss: 0.1671 - acc: 0.5000
Epoch 75/500
4/4 [==============================] - 0s 750us/sample - loss: 0.1657 - acc: 0.5000
Epoch 76/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1642 - acc: 0.5000
Epoch 77/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1629 - acc: 0.5000
Epoch 78/500
4/4 [==============================] - 0s 499us/sample - loss: 0.1615 - acc: 0.5000
Epoch 79/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1602 - acc: 0.5000
Epoch 80/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1590 - acc: 0.5000
Epoch 81/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1578 - acc: 0.5000
Epoch 82/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1566 - acc: 0.5000
Epoch 83/500
4/4 [==============================] - 0s 500us/sample - loss: 0.1555 - acc: 0.5000
Epoch 84/500
4/4 [==============================] - 0s 749us/sample - loss: 0.1544 - acc: 0.5000
Epoch 85/500
4/4 [==============================] - 0s 750us/sample - loss: 0.1533 - acc: 0.5000
###Markdown
โป ๋ฌธ์ 147. NotAND ๊ฒ์ดํธ๋ฅผ ๊ตฌํํ์์ค
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.losses import mse
# ๋ฐ์ดํฐ ์ค๋น
x = np.array([[0,0],[1,0],[0,1],[1,1]])
y = np.array([[1],[1],[1],[0]])
# ๋ชจ๋ธ ๊ตฌ์ฑํ๊ธฐ
model = Sequential()
# ๋จ์ธต ํผ์
ํธ๋ก ๊ตฌํํ๊ธฐ
model.add(Dense(1, input_shape = (2,), activation = 'linear'))
# ๋ชจ๋ธ ์ค๋นํ๊ธฐ
model.compile(optimizer = SGD(), loss = mse, metrics = ['acc'])
#ํ์ต ์ํค๊ธฐ
model.fit(x, y, epochs = 500)
model.evaluate(x, y)
result = model.predict(x)
for i in result:
for j in i:
print(round(j))
###Output
Epoch 1/500
4/4 [==============================] - 0s 26ms/sample - loss: 1.8562 - acc: 0.2500
Epoch 2/500
4/4 [==============================] - 0s 250us/sample - loss: 1.7615 - acc: 0.2500
Epoch 3/500
4/4 [==============================] - 0s 500us/sample - loss: 1.6726 - acc: 0.2500
Epoch 4/500
4/4 [==============================] - 0s 250us/sample - loss: 1.5892 - acc: 0.2500
Epoch 5/500
4/4 [==============================] - 0s 250us/sample - loss: 1.5108 - acc: 0.2500
Epoch 6/500
4/4 [==============================] - 0s 500us/sample - loss: 1.4372 - acc: 0.2500
Epoch 7/500
4/4 [==============================] - 0s 750us/sample - loss: 1.3680 - acc: 0.2500
Epoch 8/500
4/4 [==============================] - 0s 500us/sample - loss: 1.3030 - acc: 0.2500
Epoch 9/500
4/4 [==============================] - 0s 500us/sample - loss: 1.2420 - acc: 0.2500
Epoch 10/500
4/4 [==============================] - 0s 749us/sample - loss: 1.1846 - acc: 0.2500
Epoch 11/500
4/4 [==============================] - 0s 500us/sample - loss: 1.1307 - acc: 0.2500
Epoch 12/500
4/4 [==============================] - 0s 749us/sample - loss: 1.0800 - acc: 0.2500
Epoch 13/500
4/4 [==============================] - 0s 500us/sample - loss: 1.0324 - acc: 0.5000
Epoch 14/500
4/4 [==============================] - 0s 499us/sample - loss: 0.9876 - acc: 0.5000
Epoch 15/500
4/4 [==============================] - 0s 749us/sample - loss: 0.9455 - acc: 0.5000
Epoch 16/500
4/4 [==============================] - 0s 499us/sample - loss: 0.9058 - acc: 0.5000
Epoch 17/500
4/4 [==============================] - 0s 500us/sample - loss: 0.8686 - acc: 0.5000
Epoch 18/500
4/4 [==============================] - 0s 250us/sample - loss: 0.8335 - acc: 0.5000
Epoch 19/500
4/4 [==============================] - 0s 750us/sample - loss: 0.8005 - acc: 0.5000
Epoch 20/500
4/4 [==============================] - 0s 250us/sample - loss: 0.7694 - acc: 0.5000
Epoch 21/500
4/4 [==============================] - 0s 250us/sample - loss: 0.7401 - acc: 0.5000
Epoch 22/500
4/4 [==============================] - 0s 250us/sample - loss: 0.7126 - acc: 0.5000
Epoch 23/500
4/4 [==============================] - 0s 500us/sample - loss: 0.6866 - acc: 0.5000
Epoch 24/500
4/4 [==============================] - 0s 250us/sample - loss: 0.6622 - acc: 0.5000
Epoch 25/500
4/4 [==============================] - 0s 250us/sample - loss: 0.6391 - acc: 0.5000
Epoch 26/500
4/4 [==============================] - 0s 500us/sample - loss: 0.6174 - acc: 0.5000
Epoch 27/500
4/4 [==============================] - 0s 500us/sample - loss: 0.5969 - acc: 0.5000
Epoch 28/500
4/4 [==============================] - 0s 750us/sample - loss: 0.5775 - acc: 0.5000
Epoch 29/500
4/4 [==============================] - 0s 500us/sample - loss: 0.5593 - acc: 0.5000
Epoch 30/500
4/4 [==============================] - 0s 500us/sample - loss: 0.5421 - acc: 0.5000
Epoch 31/500
4/4 [==============================] - 0s 500us/sample - loss: 0.5258 - acc: 0.7500
Epoch 32/500
4/4 [==============================] - 0s 500us/sample - loss: 0.5104 - acc: 0.7500
Epoch 33/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4959 - acc: 0.7500
Epoch 34/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4822 - acc: 0.7500
Epoch 35/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4692 - acc: 0.7500
Epoch 36/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4569 - acc: 0.7500
Epoch 37/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4453 - acc: 0.7500
Epoch 38/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4343 - acc: 0.7500
Epoch 39/500
4/4 [==============================] - 0s 250us/sample - loss: 0.4238 - acc: 0.7500
Epoch 40/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4139 - acc: 0.7500
Epoch 41/500
4/4 [==============================] - 0s 500us/sample - loss: 0.4046 - acc: 0.7500
Epoch 42/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3956 - acc: 0.7500
Epoch 43/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3872 - acc: 0.7500
Epoch 44/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3792 - acc: 0.7500
Epoch 45/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3715 - acc: 0.7500
Epoch 46/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3643 - acc: 0.7500
Epoch 47/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3573 - acc: 0.7500
Epoch 48/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3508 - acc: 0.7500
Epoch 49/500
4/4 [==============================] - 0s 750us/sample - loss: 0.3445 - acc: 0.7500
Epoch 50/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3385 - acc: 0.7500
Epoch 51/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3328 - acc: 0.7500
Epoch 52/500
4/4 [==============================] - 0s 750us/sample - loss: 0.3273 - acc: 0.7500
Epoch 53/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3221 - acc: 0.7500
Epoch 54/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3172 - acc: 0.7500
Epoch 55/500
4/4 [==============================] - 0s 749us/sample - loss: 0.3124 - acc: 0.7500
Epoch 56/500
4/4 [==============================] - 0s 500us/sample - loss: 0.3078 - acc: 0.7500
Epoch 57/500
4/4 [==============================] - 0s 250us/sample - loss: 0.3035 - acc: 0.7500
Epoch 58/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2993 - acc: 0.7500
Epoch 59/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2952 - acc: 0.7500
Epoch 60/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2914 - acc: 0.7500
Epoch 61/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2877 - acc: 0.7500
Epoch 62/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2841 - acc: 0.7500
Epoch 63/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2807 - acc: 0.7500
Epoch 64/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2773 - acc: 0.7500
Epoch 65/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2741 - acc: 0.7500
Epoch 66/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2711 - acc: 0.7500
Epoch 67/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2681 - acc: 0.7500
Epoch 68/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2652 - acc: 0.7500
Epoch 69/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2624 - acc: 0.7500
Epoch 70/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2597 - acc: 0.7500
Epoch 71/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2571 - acc: 0.7500
Epoch 72/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2546 - acc: 0.7500
Epoch 73/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2521 - acc: 0.7500
Epoch 74/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2498 - acc: 0.7500
Epoch 75/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2474 - acc: 0.7500
Epoch 76/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2452 - acc: 0.7500
Epoch 77/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2430 - acc: 0.7500
Epoch 78/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2409 - acc: 0.7500
Epoch 79/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2388 - acc: 0.7500
Epoch 80/500
4/4 [==============================] - 0s 249us/sample - loss: 0.2368 - acc: 0.7500
Epoch 81/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2348 - acc: 0.7500
Epoch 82/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2329 - acc: 0.7500
Epoch 83/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2310 - acc: 0.7500
Epoch 84/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2292 - acc: 0.7500
Epoch 85/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2274 - acc: 0.7500
###Markdown
โป ๋ฌธ์ 148. XOR ๊ฒ์ดํธ๋ฅผ ๊ตฌํํ์์ค
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential # ์ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์ฑ
from tensorflow.keras.layers import Dense # ์์ ์ฐ๊ฒฐ๊ณ์ธต
from tensorflow.keras.optimizers import SGD, RMSprop # ๊ฒฝ์ฌ ๊ฐ์๋ฒ
from tensorflow.keras.losses import mse # ์ค์ฐจํจ์์
x = np.array([[0,0],[1,0],[0,1],[1,1]])
y = np.array([[0],[1],[1],[0]])
model = Sequential()
model.add(Dense(32, input_shape = (2,), activation = 'relu'))
# ๋ชจ๋ธ์ ์ฒซ๋ฒ์งธ ์ธต์ ๋ฐ์ดํฐ์ ํํ(input_shape)๋ฅผ ์ ๋ฌํด์ค์ผ ํจ
model.add(Dense(1, activation = 'sigmoid'))
# ํ๊ท ์ ๊ณฑ์ค์ฐจ ํ๊ท๋ฌธ์
model.compile(optimizer = RMSprop(), loss = mse, metrics = ['acc'])
# ๋ชจ๋ธ์ model.add๋ก ๊ตฌ์ฑํ์ผ๋ฉด compile ํจ์๋ฅผ ํธ์ถํด์ ํ์ต๊ณผ์ ์ ์ค์
# metrics = ['acc'] : ํ์ต๊ณผ์ ์ ๋ชจ๋ํฐ๋ง ํ๊ธฐ ์ํด์ ์ค์
"""
์ดํญ๋ถ๋ฅ ๋ฌธ์ (์ดํ๋ฆฌ, ๊ฐ์์ง/๊ณ ์์ด๊ฐ์ 2๊ฐ์ง ๋ถ๋ฅ)
model.compile(optimizer = RMSprop(), loss = binary_crossentropy, metrics = ['acc'])
๋คํญ๋ถ๋ฅ ๋ฌธ์
model.compile(optimizer = RMSprop(), loss = categorical_crossentropy, metrics = ['acc'])
์ตํฐ๋ง์ด์ ์ข
๋ฅ : SGD, RMSprop, Adam, NAdam ๋ฑ
์์คํจ์(์ค์ฐจํจ์) ์ข
๋ฅ : mse, binary_crossentropy, categorical_crossentropy
"""
model.fit(x, y, epochs = 500)
# model.fit(data, label, epochs= , validation_data=(val_data, val_label) )
"""
validation_data : ๊ฒ์ ๋ฐ์ดํฐ. ๋ชจ๋ธ์ ์ฑ๋ฅ์ ๋ชจ๋ํฐ๋ง ํ๊ธฐ ์ํด์ ์ฌ์ฉ. ํ๋ จ ๋ฐ์ดํฐ์ ์ผ๋ถ๋ฅผ ๊ฐ์ง๊ณ ๋ง๋ ๋ฐ์ดํฐ
ํ๋ จ์ด ์ ๋๋์ง ์ฑ๋ฅ์ ๋ณด๊ธฐ์ํ ํ๊ฐ ์งํ๋ก ์ฌ์ฉ
"""
model.evaluate(x, y)
"""
evaluate ํจ์๋ฅผ ์ฌ์ฉํ๋ฉด ์์ค๊ณผ ํ๊ฐ ์งํ์ ๋ํ ์ ๋ณด๋ฅผ ํ์ธ
๊ฒฐ๊ณผ ์: [[-0.18919873]
[ 0.27109194]
[ 0.24799117]
[ 0.7082819 ]]
"""
result = model.predict(x)
for i in result:
for j in i:
print(round(j))
###Output
Epoch 1/500
4/4 [==============================] - 0s 65ms/sample - loss: 0.2540 - acc: 0.7500
Epoch 2/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2518 - acc: 0.5000
Epoch 3/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2503 - acc: 0.5000
Epoch 4/500
4/4 [==============================] - 0s 2ms/sample - loss: 0.2493 - acc: 0.5000
Epoch 5/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2484 - acc: 0.5000
Epoch 6/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2476 - acc: 0.5000
Epoch 7/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2468 - acc: 0.5000
Epoch 8/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2461 - acc: 0.5000
Epoch 9/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2455 - acc: 0.5000
Epoch 10/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2450 - acc: 0.5000
Epoch 11/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2445 - acc: 0.5000
Epoch 12/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2440 - acc: 0.5000
Epoch 13/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2435 - acc: 0.5000
Epoch 14/500
4/4 [==============================] - 0s 750us/sample - loss: 0.2431 - acc: 0.5000
Epoch 15/500
4/4 [==============================] - 0s 4ms/sample - loss: 0.2426 - acc: 0.5000
Epoch 16/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2422 - acc: 0.5000
Epoch 17/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2418 - acc: 0.5000
Epoch 18/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2414 - acc: 0.5000
Epoch 19/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2410 - acc: 0.5000
Epoch 20/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2406 - acc: 0.5000
Epoch 21/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2402 - acc: 0.5000
Epoch 22/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2398 - acc: 0.5000
Epoch 23/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2394 - acc: 0.5000
Epoch 24/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2391 - acc: 0.5000
Epoch 25/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2387 - acc: 0.5000
Epoch 26/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2383 - acc: 0.5000
Epoch 27/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2379 - acc: 0.5000
Epoch 28/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2376 - acc: 0.5000
Epoch 29/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2372 - acc: 0.5000
Epoch 30/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2368 - acc: 0.5000
Epoch 31/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2365 - acc: 0.5000
Epoch 32/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2361 - acc: 0.5000
Epoch 33/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2358 - acc: 0.5000
Epoch 34/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2354 - acc: 0.5000
Epoch 35/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2350 - acc: 0.5000
Epoch 36/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2347 - acc: 0.5000
Epoch 37/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2343 - acc: 0.5000
Epoch 38/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2340 - acc: 0.5000
Epoch 39/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2336 - acc: 0.5000
Epoch 40/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2332 - acc: 0.5000
Epoch 41/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2329 - acc: 0.5000
Epoch 42/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2325 - acc: 0.5000
Epoch 43/500
4/4 [==============================] - 0s 749us/sample - loss: 0.2321 - acc: 0.5000
Epoch 44/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2318 - acc: 0.5000
Epoch 45/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2314 - acc: 0.5000
Epoch 46/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2310 - acc: 0.5000
Epoch 47/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2307 - acc: 0.5000
Epoch 48/500
4/4 [==============================] - 0s 249us/sample - loss: 0.2303 - acc: 0.5000
Epoch 49/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2299 - acc: 0.5000
Epoch 50/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2296 - acc: 0.5000
Epoch 51/500
4/4 [==============================] - 0s 498us/sample - loss: 0.2292 - acc: 0.5000
Epoch 52/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2288 - acc: 0.5000
Epoch 53/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2284 - acc: 0.5000
Epoch 54/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2281 - acc: 0.5000
Epoch 55/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2277 - acc: 0.5000
Epoch 56/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2273 - acc: 0.5000
Epoch 57/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2270 - acc: 0.5000
Epoch 58/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2266 - acc: 0.5000
Epoch 59/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2262 - acc: 0.7500
Epoch 60/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2258 - acc: 0.7500
Epoch 61/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2254 - acc: 0.7500
Epoch 62/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2251 - acc: 1.0000
Epoch 63/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2247 - acc: 1.0000
Epoch 64/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2243 - acc: 1.0000
Epoch 65/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2239 - acc: 1.0000
Epoch 66/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2235 - acc: 1.0000
Epoch 67/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2231 - acc: 1.0000
Epoch 68/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2227 - acc: 1.0000
Epoch 69/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2224 - acc: 1.0000
Epoch 70/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2219 - acc: 1.0000
Epoch 71/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2216 - acc: 1.0000
Epoch 72/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2212 - acc: 1.0000
Epoch 73/500
4/4 [==============================] - 0s 499us/sample - loss: 0.2208 - acc: 1.0000
Epoch 74/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2204 - acc: 1.0000
Epoch 75/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2200 - acc: 1.0000
Epoch 76/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2197 - acc: 1.0000
Epoch 77/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2192 - acc: 1.0000
Epoch 78/500
4/4 [==============================] - 0s 498us/sample - loss: 0.2188 - acc: 1.0000
Epoch 79/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2184 - acc: 1.0000
Epoch 80/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2181 - acc: 1.0000
Epoch 81/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2176 - acc: 1.0000
Epoch 82/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2173 - acc: 1.0000
Epoch 83/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2168 - acc: 1.0000
Epoch 84/500
4/4 [==============================] - 0s 500us/sample - loss: 0.2165 - acc: 1.0000
Epoch 85/500
4/4 [==============================] - 0s 250us/sample - loss: 0.2160 - acc: 1.0000
###Markdown
โ ํ
์ ํ๋ก์ฐ 2.x๋ก mnist ๋ฐ์ดํฐ ํ์ต์ํค๋ ์ ๊ฒฝ๋ง ๊ตฌํ 1. mnist ๋ฐ์ดํฐ์
๋ค์ด๋ฐ๊ธฐ
###Code
from tensorflow.keras.datasets.mnist import load_data
(x_train, y_train), (x_test, y_test) = load_data(path='mnist.npz')
print(x_train.shape, y_train.shape)
print(y_train)
print(x_test.shape, y_test.shape)
print(y_test)
###Output
(60000, 28, 28) (60000,)
[5 0 4 ... 5 6 8]
(10000, 28, 28) (10000,)
[7 2 1 ... 4 5 6]
###Markdown
2. ๋ฐ์ดํฐ ๊ทธ๋ ค๋ณด๊ธฐ
###Code
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['figure.figsize']=(5,5)
plt.rcParams.update({'font.size':13})
sample_size = 3
random_idx = np.random.randint(60000, size = sample_size)
for idx in random_idx:
img = x_train[idx, :]
label = y_train[idx]
plt.figure()
plt.imshow(img)
###Output
_____no_output_____
###Markdown
3. ๊ฒ์ฆ ๋ฐ์ดํฐ ๋ง๋ค๊ธฐ
###Code
from sklearn.model_selection import train_test_split
# ํ๋ จ, ํ
์คํธ ๋ฐ์ดํฐ 7:3 ๋น์จ๋ก ๋ถ๋ฆฌ
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.3, random_state = 777)
print(x_train.shape)
print(x_val.shape)
###Output
(42000, 28, 28)
(18000, 28, 28)
###Markdown
4. ๋ชจ๋ธ์ ์
๋ ฅํ๊ธฐ ์ ์ ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ(์ ๊ทํ) ๋ฉด์ ์ง๋ฌธ : ์ ๊ทํ ๋๋ ์ค์ผ์ผ์ ํด์ผํ๋ ์ด์ ์ ๊ฒฝ๋ง์ ์
๋ ฅ ๋ฐ์ดํฐ์ ์ค์ผ์ผ์ ๋งค์ฐ ๋ฏผ๊ฐํ๋ฏ๋ก ์ ์ ํ ์ ์ฒ๋ฆฌ๊ฐ ํ์ ์ซ์์ ํ๋์ ํฝ์
์ด 0~255 ์ฌ์ด์ ๋ฒ์์ ์๊ธฐ ๋๋ฌธ์ ํ๋์ ํฝ์
์ 255๋ก ๋๋๋ฉด 0~1์ฌ์ด์ ์ซ์๋ก ์ค์ผ์ผ์ด ๋๋ค.
###Code
num_x_train = x_train.shape[0]
num_x_val = x_val.shape[0]
num_x_test = x_test.shape[0]
x_train = (x_train.reshape((num_x_train, 28*28)))/255
x_val = (x_val.reshape((num_x_val, 28*28)))/255
x_test = (x_test.reshape((num_x_test, 28*28)))/255
print(x_train.shape) # ๋ชจ๋ธ ์
๋ ฅ์ ์ํด ๋ฐ์ดํฐ๋ฅผ 2์ฐจ์์ผ๋ก ๋ณ๊ฒฝ
print(x_val.shape)
print(x_test.shape)
###Output
(42000, 784)
(18000, 784)
(10000, 784)
###Markdown
5. ๋ชจ๋ธ ์
๋ ฅ์ ์ํ ๋ ์ด๋ธ ์ ์ฒ๋ฆฌ
###Code
from tensorflow.keras.utils import to_categorical
# mnist์ ๋ผ๋ฒจ ์ซ์๋ฅผ One_Hot_encoding ํจ์
y_train = to_categorical(y_train)
y_val = to_categorical(y_val)
"""
loader2.py์ label_load์์ one hot encoding ํ์ผ๋ฏ๋ก ์ดํ๋ฆฌ ๋ฐ์ดํฐ ๋ถ๋ฅ ์ ๊ฒฝ๋ง์์๋ ์์ ์ฝ๋๋ฅผ ์ฌ์ฉํ์ง ์์
"""
print(y_val)
###Output
[[0. 0. 0. ... 1. 0. 0.]
[0. 0. 0. ... 0. 1. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 1. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
###Markdown
6. ๋ชจ๋ธ ๊ตฌ์ฑํ๊ธฐ
###Code
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(64, input_shape=(784,), activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
###Output
_____no_output_____
###Markdown
7. ๋ชจ๋ธ๊ณผ์ ์ค์ ํ๊ธฐ
###Code
from tensorflow.keras.losses import categorical_crossentropy
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
###Output
_____no_output_____
###Markdown
8. ๋ชจ๋ธ ํ์ตํ๊ธฐ
###Code
history = model.fit(x_train, y_train, epochs=30, batch_size=128, validation_data=(x_val, y_val))
###Output
Train on 42000 samples, validate on 18000 samples
Epoch 1/30
42000/42000 [==============================] - 3s 74us/sample - loss: 0.4763 - acc: 0.8654 - val_loss: 0.2444 - val_acc: 0.9291
Epoch 2/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.2074 - acc: 0.9398 - val_loss: 0.1923 - val_acc: 0.9446
Epoch 3/30
42000/42000 [==============================] - 2s 37us/sample - loss: 0.1560 - acc: 0.9540 - val_loss: 0.1617 - val_acc: 0.9529
Epoch 4/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.1273 - acc: 0.9628 - val_loss: 0.1402 - val_acc: 0.9579
Epoch 5/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.1075 - acc: 0.9688 - val_loss: 0.1287 - val_acc: 0.9614
Epoch 6/30
42000/42000 [==============================] - 1s 23us/sample - loss: 0.0911 - acc: 0.9739 - val_loss: 0.1249 - val_acc: 0.9624
Epoch 7/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0796 - acc: 0.9768 - val_loss: 0.1154 - val_acc: 0.9658
Epoch 8/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.0693 - acc: 0.9796 - val_loss: 0.1160 - val_acc: 0.9664
Epoch 9/30
42000/42000 [==============================] - 1s 24us/sample - loss: 0.0609 - acc: 0.9821 - val_loss: 0.1115 - val_acc: 0.9669
Epoch 10/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.0521 - acc: 0.9846 - val_loss: 0.1067 - val_acc: 0.9691
Epoch 11/30
42000/42000 [==============================] - 1s 20us/sample - loss: 0.0463 - acc: 0.9856 - val_loss: 0.1035 - val_acc: 0.9700
Epoch 12/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0413 - acc: 0.9875 - val_loss: 0.1111 - val_acc: 0.9671
Epoch 13/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0369 - acc: 0.9887 - val_loss: 0.1084 - val_acc: 0.9693
Epoch 14/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.0312 - acc: 0.9912 - val_loss: 0.1087 - val_acc: 0.9693
Epoch 15/30
42000/42000 [==============================] - 1s 24us/sample - loss: 0.0295 - acc: 0.9908 - val_loss: 0.1108 - val_acc: 0.9699
Epoch 16/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0263 - acc: 0.9925 - val_loss: 0.1110 - val_acc: 0.9693
Epoch 17/30
42000/42000 [==============================] - 1s 24us/sample - loss: 0.0221 - acc: 0.9939 - val_loss: 0.1082 - val_acc: 0.9698
Epoch 18/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.0211 - acc: 0.9936 - val_loss: 0.1149 - val_acc: 0.9698
Epoch 19/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.0202 - acc: 0.9937 - val_loss: 0.1185 - val_acc: 0.9687
Epoch 20/30
42000/42000 [==============================] - 1s 23us/sample - loss: 0.0159 - acc: 0.9957 - val_loss: 0.1181 - val_acc: 0.9689
Epoch 21/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0137 - acc: 0.9961 - val_loss: 0.1160 - val_acc: 0.9705
Epoch 22/30
42000/42000 [==============================] - 1s 21us/sample - loss: 0.0120 - acc: 0.9971 - val_loss: 0.1296 - val_acc: 0.9687
Epoch 23/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0140 - acc: 0.9960 - val_loss: 0.1355 - val_acc: 0.9672
Epoch 24/30
42000/42000 [==============================] - 1s 23us/sample - loss: 0.0089 - acc: 0.9980 - val_loss: 0.1308 - val_acc: 0.9696
Epoch 25/30
42000/42000 [==============================] - 1s 23us/sample - loss: 0.0083 - acc: 0.9982 - val_loss: 0.1366 - val_acc: 0.9692
Epoch 26/30
42000/42000 [==============================] - 1s 25us/sample - loss: 0.0077 - acc: 0.9985 - val_loss: 0.1384 - val_acc: 0.9700
Epoch 27/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0150 - acc: 0.9950 - val_loss: 0.1409 - val_acc: 0.9689
Epoch 28/30
42000/42000 [==============================] - 1s 22us/sample - loss: 0.0095 - acc: 0.9972 - val_loss: 0.1508 - val_acc: 0.9676
Epoch 29/30
42000/42000 [==============================] - 1s 23us/sample - loss: 0.0062 - acc: 0.9987 - val_loss: 0.1327 - val_acc: 0.9721
Epoch 30/30
42000/42000 [==============================] - 1s 24us/sample - loss: 0.0043 - acc: 0.9991 - val_loss: 0.1534 - val_acc: 0.9679
###Markdown
9. history๋ฅผ ํตํด ํ์ธํด ๋ณผ ์ ์๋ ๊ฐ ์ถ๋ ฅํ๊ธฐ
###Code
history.history.keys()
###Output
_____no_output_____
###Markdown
10. ์๊ฐํ ์ฝ๋
###Code
plt.rcParams['figure.figsize']=(20,10)
plt.rcParams.update({'font.size':15})
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
###Output
_____no_output_____ |
09_fv_nonlinear.ipynb | ###Markdown
Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Kyle T. Mandli
###Code
from __future__ import print_function
%matplotlib inline
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation
from IPython.display import HTML
from clawpack import pyclaw
from clawpack import riemann
###Output
_____no_output_____
###Markdown
Finite Volume Methods for Nonlinear Systems Godunov's MethodRecall that our goal is to evolve the cell-averages$$ Q^n_i \approx \frac{1}{\Delta x} \int^{x_{i+1/2}}_{x_{i-1/2}} q(x, t_n) dx$$using a piecewise reconstruction $\widetilde{q}^n(x, t_n)$ using these cell-averages and evolving these functions using the conservation law.Solving (evolving) over time $\Delta t$ with this data gives us the function $\widetilde{q}^n(x, t_{n+1})$ leading to$$ Q^{n+1}_i = \frac{1}{\Delta x} \int^{x_{i+1/2}}_{x_{i-1/2}} \widetilde{q}^n(x, t_{n+1}) dx$$. The final component of Godunov's method suggests that we do not need the entire Riemann solution but only need the solution along the cell-interface $x_{i-1/2}$ such that$$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{\Delta x} (F^n_{i+1/2} - F^n_{i-1/2})$$where$$ F^n_{i-1/2} = \mathcal{F}(Q^n_{i-1}, Q^n_i) = f(\widehat{q}(Q^n_{i-1}, Q^n_i)$$where $\widehat{q}(Q^n_{i-1}, Q^n_i)$ is the Riemann solution evaluated along $x/t = 0$. Godunov's method can also be implemented in wave-propagation form$$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{\Delta x} (\mathcal{A}^+ \Delta Q_{i-1/2} + \mathcal{A}^- \Delta Q_{i+1/2}),$$which takes the fluxes to be$$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= f(\widehat{Q}_{i-1/2}) - f(Q_{i-1}) \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= f(Q_{i}) - f(\widehat{Q}_{i-1/2}).\end{aligned}$$ The primary note of importance now is that all we need for Godunov's method is what the solution along the grid cell edge is rather than the full Riemann solution we have been working with. This strongly suggests that there may be ways to use approximate Riemann solvers that give us only what we need and are less expensive. Convergence of Godunov's MethodIt also useful at this point to review what we know about the convergence of Godunov's method that we showed before. 1. The Lax-Wendroff theorem implies that for nonlinear systems of conservation laws, if we have a sequence of numerical approximations representing grid refinement that this sequence will converge in the appropriate sense to a function $q(x,t)$ and that this functions is a weak solution of the conservation law. This is unfortunately not true in general for the nonconservative form of the equations. 2. Entropy conditions will allow us to pick out the correct weak solution and if we employ Riemann solvers that obey the appropriate Entropy conditions that the overall method will also pick out the entropy satisfying solution. 3. The Lax-Wendroff theorem unfortunately does not guarantee convergence, rather it only says *if* a sequence converges that it converges to a weak solution of the conservation law. Showing convergence requires a form of stability for which we used TV-stability before. Unfortunately TV-stability cannot be extended as is to the system case. Approximate Riemann SolversWe now will start to discuss the idea that perhaps we only need a small part of the full Riemann solution if we are interested in using Godunov's methods. In particular, if $\widehat{q}(q_\ell, q_r)$ is the general, full solution to a Riemann problem that we only need to know the state along $x/t = 0$. This usually implies that we need to compute one of the middle states $q_m$ of the Riemann solution although this is highly dependent on wave speeds and criticality conditions. Define a function$$ \widehat{Q}_{i-1/2}(x/t)$$that approximates the true similarity solution of the Riemann problem with input data $Q_{i-1}$ and $Q_i$. This approximation will generally depend on some set of jumps in $Q$ where$$ Q_i - Q_{i-1} = \sum^{M_w}_{p=1} \mathcal{W}^p_{i-1/2}$$where now we are allowed to pick out how many waves $M_w$ represent the approximation. Generalizing Godunov's method to systems then we could take two different approaches to defining the fluctuations:1. Define the numerical flux by$$ F_{i-1/2} = f(\widehat{Q}_{i-1/2})$$where$$ \widehat{Q}_{i-1/2} = Q_{i-1} + \sum_{p:s^p_{i-1/2} < 0} \mathcal{W}^p_{i-1/2}.$$In other words the state that lies along $x/t = 0$. We can also go the other direction so that$$ \widehat{Q}_{i-1/2} = Q_{i} - \sum_{p:s^p_{i-1/2} > 0} \mathcal{W}^p_{i-1/2}.$$Therefore the fluctuations are $$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= f(\widehat{Q}_{i-1/2}) - f(Q_{i-1}) \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= f(Q_{i}) - f(\widehat{Q}_{i-1/2}) \\\end{aligned}$$1. Use the waves and speeds to define$$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^- \mathcal{W}^p_{i-1/2} \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^+ \mathcal{W}^p_{i-1/2} \\\end{aligned}$$ The important realization here is that both of these approaches are the same for the all-shock Riemann solution. This also implies that unless we have a transonic rarefaction, i.e. a rarefaction fan contains $x/t = 0$ we do not need to worry about what the exact type of wave is but rather what middle state contains $x/t = 0$. Linearized Riemann SolversProbably the most natural way to find an approximate Riemann solver is to find some linearization of the problem that is appropriate for the conservation law such that for $q_t + f(q)_x = 0$ we can instead locally solve$$ \widehat{q}_t + \widehat{A}_{i-1/2} \widehat{q}_x = 0$$such that the matrix $\widehat{A}_{i-1/2}$ as an appropriate approximation to $f'(q)$ valid in the neighborhood of $x_{i-1/2}$. We also need to require that $\widehat{A}_{i-1/2}$ is diagonalizable with real eigenvalues so that we can have some sense that$$ \widehat{A}_{i-1/2} \rightarrow f'(\overline{q}) \quad \text{as } Q_{i-1}, Q_i \rightarrow \overline{q}$$for consistency. One of the reasons that we expect this to work is that for most Riemann solutions the shocks are well isolated. This implies that the difference from one cell to another$$ ||Q_i - Q_{i-1}|| = \mathcal{O}(\Delta x)$$as long as the jump is not large and therefore$$ f'(Q_{i-1}) \approx f'(Q_i).$$ We also know that if $||Q_i - Q_{i-1}|| = \mathcal{O}(\Delta x)$ that we expect that the Hugoniot loci and integral curves are similar to the eigenvectors of the system. The solution would then be similar to the linear hyperbolic systems we have studied before with the waves determined by the eigenvectors $\widehat{r}^p_{i-1/2}$ and speeds $s^p_{i-1/2} = \widehat{\lambda}^p_{i-1/2}$. This also allows us to easily identify the waves as$$ Q_i - Q_{i-1} = \sum^m_{p=1} \alpha^p_{i-1/2} \widehat{r}^p_{i-1/2}$$and therefore$$ \mathcal{W}^p_{i-1/2} = \alpha^p_{i-1/2} \widehat{r}^p_{i-1/2}.$$ There are of course multiple ways to form this linearized approximation. In general we could use$$ \widehat{A}_{i-1/2} = f'(\overline{Q}_{i-1/2})$$where $\overline{Q}_{i-1/2}$ is some appropriate "average state" dependent on $Q_i$ and $Q_{i-1}$. - What "average state" would you propose? - What properties of the solution might not work in general? The most obvious average is the true average of the values$$ \overline{Q}_{i-1/2} = \frac{1}{2} (Q_i + Q_{i-1}).$$Although this is consistent this does not imply that the method is consistent unless some form of $$ \widehat{Q}_{i-1/2} = Q_{i-1} + \sum_{p:s^p_{i-1/2} < 0} \mathcal{W}^p_{i-1/2}$$and $$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= f(\widehat{Q}_{i-1/2}) - f(Q_{i-1}) \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= f(Q_{i}) - f(\widehat{Q}_{i-1/2}) \\\end{aligned}$$are satisfied. Unfortunately$$\begin{aligned} \mathcal{A}^- \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^- \mathcal{W}^p_{i-1/2} \\ \mathcal{A}^+ \Delta Q_{i-1/2} &= \sum^{M_w}_{p=1} (s^p_{i-1/2})^+ \mathcal{W}^p_{i-1/2} \\\end{aligned}$$does not guarantee conservation unless we add an additional condition. The primary condition we need to ensure for conservation is actually$$ f(Q_i) - f(Q_{i-1}) = \sum^{M_w}_{p=1} s^p_{i-1/2} \mathcal{W}^p_{i-1/2},$$which in general is not satisfied for many different forms of $\overline{Q}_{i-1/2}$. Averaging $Q$ values is not the only approach, why not average the flux values with$$ \widehat{A}_{i-1/2} = \frac{1}{2}[f'(Q_{i-1}) + f'(Q_i)]$$or perhaps some other average of Jacobian evaluations. Unfortunately this also does not satisfy the jump in fluxes previously mentioned unless care is taken. Roe LinearizationOne of the keys to providing a robust linearization is to put some conditions on the linearization and its eigenspace. The first of these is:> If $Q_{i-1}$ and $Q_i$ are connected by a single wave $\mathcal{W}^p = Q_i - Q_{i-1}$ in the true Riemann solution, then $\mathcal{W}^p$ should also be an eigenvector of $\widehat{A}_{i-1/2}$.If this is true then the approximation will consist of a single wave that agrees with the exact Riemann solution with the strongest solution. Another way to say this is that if $Q_i$ and $Q_{i-1}$ are connected by a single wave, then$$ f(Q_i) - f(Q_{i-1}) = s (Q_i - Q_{i-1}).$$If the linearized problem also has this form then$$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = s (Q_i - Q_{i-1})$$implying$$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = f(Q_i) - f(Q_{i-1}).$$If this last expression is true then an approximate solver of this form is in fact conservative. This can also be shown via$$ \mathcal{A}^- \Delta Q_{i-1/2} + \mathcal{A}^+ \Delta Q_{i-1/2} = f(Q_i) - f(Q_{i-1}),$$which is implied by the above condition. Consequently the condition$$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = f(Q_i) - f(Q_{i-1})$$is often called **Roe's Condition**. The practical side of this is that we need to find an average that satisfies this condition. One way to do this is to think of the problem as finding a path through state space connecting $Q_i$ and $Q_{i-1}$ parameterized by$$ q(\xi) = Q_{i-1} + (Q_i - Q_{i-1}) \xi$$for $\xi \in [0, 1]$ and require it satisfy Roe's condition. Writing this out we then have$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \int^1_0 \frac{\text{d}}{\text{d} \xi} f(q(\xi)) d\xi \\ &= \int^1_0 f'(q(\xi)) q'(\xi) d\xi \\ &= \left[ \int^1_0 f'(q(\xi)) d\xi \right ] (Q_i - Q_{i-1}).\end{aligned}$$ Recalling that we need $$ \widehat{A}_{i-1/2} (Q_i - Q_{i-1}) = f(Q_i) - f(Q_{i-1})$$this implies that $$ f(Q_i) - f(Q_{i-1}) = \left[ \int^1_0 f'(q(\xi)) d\xi \right ] (Q_i - Q_{i-1})$$gives us$$ \widehat{A}_{i-1/2} = \int^1_0 f'(q(\xi)) d\xi.$$ This unfortunately does not guarantee that the resulting matrix $\widehat{A}_{i-1/2}$ is diagonalizable with real eigenvalues. The integral itself can also be difficult to evaluate leaving us wanting a better approach. Instead Roe proposed a **parameter vector** $z(q)$, effectively a change of variables, that leads not only to easier evaluation of the integrals but also to evaluations that satisfies properties that we want. Here we now will integrate along the path$$ z(\xi) = Z_{i-1} + (Z_i - Z_{i-1}) \xi$$where $Z_j = z(Q_j)$ for $j=i-1, i$ and therefore $z'(\xi) = Z_i - Z_{i-1}$ that is independent of $\xi$. This implies$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \int^1_0 \frac{\text{d}}{\text{d} \xi} f(z(\xi)) d\xi \\ &= \int^1_0 f'(z(\xi)) z'(\xi) d\xi \\ &= \left[ \int^1_0 f'(z(\xi)) d\xi \right ] (Z_i - Z_{i-1}).\end{aligned}$$ This expression we hope is easier to evaluate but we have no idea what this expression $z(\xi)$ really is yet. We can find this by rewriting $z(q)$ as$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \widehat{C}_{i-1/2} (Z_i - Z_{i-1}) \\ Q_i - Q_{i-1} &= \widehat{B}_{i-1/2} (Z_i - Z_{i-1})\end{aligned}$$and therefore observing$$ \widehat{A}_{i-1/2} = \widehat{C}_{i-1/2} \widehat{B}^{-1}_{i-1/2}.$$ Harten and Lax showed that this approach will always be able to produce $\widehat{A}_{i-1/2}$ if the system has a convex entropy. One can actually also then choose $z(q) = \eta'(q)$.This being true we still want to ensure that the integrals of interest are easily evaluated, which is best shown by an example. Example: Roe Solver for Shallow Water$$ q = \begin{bmatrix} h \\ hu \end{bmatrix} = \begin{bmatrix} q^1 \\ q^2 \end{bmatrix} \quad f(q) = \begin{bmatrix} hu \\ hu^2 + \frac{1}{2} gh^2 \end{bmatrix} = \begin{bmatrix} q^2 \\ \frac{q^2}{(q^1)^2} + \frac{1}{2} g (q^1)^2 \end{bmatrix}$$and$$ f'(q) = \begin{bmatrix} 0 & 1 \\ -\left(\frac{q^2}{q^1} \right)^2 + g q^1 & 2 \frac{q^2}{q^1} \end{bmatrix} = \begin{bmatrix} 0 & 1 \\ -u^2 + g h & 2 u \end{bmatrix}$$Choose the parameterization as$$ z = h^{-1/2} q \quad \Rightarrow \quad \begin{bmatrix} z^1 \\ z^2 \end{bmatrix} = \begin{bmatrix} \sqrt{h} \\ u \sqrt{h} \end{bmatrix}$$See if you can carry this parameterization though and find $\widehat{A}_{i-1/2}$. Taking$$q(z) = \begin{bmatrix} (z^1)^2 \\ z^1 z^2 \end{bmatrix} \quad \Rightarrow \quad \frac{\partial q}{\partial z} = \begin{bmatrix} 2z^1 & 0 \\ z^2 & z^1 \end{bmatrix}$$and therefore$$ f(z) = \begin{bmatrix} z^1 z^2 \\ (x^2)^2 + \frac{1}{2} g (z^1)^4 \end{bmatrix} \quad \Rightarrow \quad \frac{\partial}{\partial z} f(z) = \begin{bmatrix} z^2 & z^1 \\ 2 g (z^1)^3 & 2 z^2 \end{bmatrix}$$ We now need to integrate from $\xi = 0 \ldots 1$ where$$ z^p = Z^p_{i-1} + (Z^p_i - Z^p_{i-1}) \xi.$$At this point all of our traverses through state space are linear except for one but we are still considering polynomials. Integrating the linear terms in our integrals leads us to$$ \int^1_0 z^p(\xi) d\xi = \frac{1}{2} (Z^p_{i-1} + Z^p_{i}) \equiv \overline{Z}^p,$$clearly just the average of the states of the transformed quantities $z(q)$.Integrating the higher order terms we have$$\begin{aligned} \int^1_0 (z^1(\xi))^3 d\xi &= \frac{1}{4} \left( \frac{(Z^1_i)^4 - (Z^1_{i-1})^4}{Z^1_i - Z^1_{i-1}} \right) \\ &= \frac{1}{2}(Z^1_{i-1} + Z^1_i) \cdot \frac{1}{2} \left [ (Z^1_{i-1})^2 + (Z^1_i)^2 \right ] \\ &= \overline{Z}^1 \overline{h},\end{aligned}$$where$$ \overline{h} = \frac{1}{2} (h_{i-1} + h_i).$$ From this we obtain$$ \widehat{B}_{i-1/2} = \begin{bmatrix} 2 \overline{Z}^1 & 0 \\ \overline{Z}^2 & \overline{Z}^1 \end{bmatrix}$$and$$ \widehat{C}_{i-1/2} = \begin{bmatrix} \overline{Z}^2 & \overline{Z}^1 \\ 2 g \overline{Z}^1 \overline{h} & 2 \overline{Z}^2 \end{bmatrix}.$$ $$ \widehat{B}_{i-1/2} = \begin{bmatrix} 2 \overline{Z}^1 & 0 \\ \overline{Z}^2 & \overline{Z}^1 \end{bmatrix} \quad \quad \widehat{C}_{i-1/2} = \begin{bmatrix} \overline{Z}^2 & \overline{Z}^1 \\ 2 g \overline{Z}^1 \overline{h} & 2 \overline{Z}^2 \end{bmatrix}$$Therefore$$ \widehat{A}_{i-1/2} = \widehat{C}_{i-1/2} \widehat{C}^{-1}_{i-1/2} = \begin{bmatrix} 0 & 1 \\ -\left(\frac{\overline{Z}^2}{\overline{Z}^1} \right)^2 + g \overline{h} & 2 \frac{\overline{Z}^2}{\overline{Z}^1} \end{bmatrix} = \begin{bmatrix} 0 & 1\\ -\widehat{u}^2 + g \overline{h} & 2 \widehat{u} \end{bmatrix}$$where$$ \overline{h} = \frac{1}{2} (h_{i-1} + h_i)$$and$$ \widehat{u} = \frac{\overline{Z}^2}{\overline{Z}^1} = \frac{u_{i-1} \sqrt{h_{i-1}} + u_i \sqrt{h_i}}{\sqrt{h_{i-1}} + \sqrt{h_i}}$$ Sonic Entropy FixesOne of the biggest drawbacks to a Roe linearized Riemann solver is that the solution formally only consists of shocks. Even in the scalar case the Roe condition can be satisfied by$$ \widehat{\mathcal{A}}_{i-1/2} = \frac{f(Q_i) - f(Q_{i-1})}{Q_i - Q_{i-1}}$$where here $\widehat{\mathcal{A}}_{i-1/2}$ is a scalar. This is of course the shock speed.As mentioned before numerically this is only a problem for transonic rarefactions where$$ f'(q_\ell) < 0 < f'(q_r)$$for the scalar case (these are of course the edges of the rarefaction wave).The same holds true for systems of equations when a particular wave is a transonic rarefaction. For the shallow water equations we can easily check if one of the two waves is a transonic rarefaction with the following computation:$$\begin{aligned} \lambda^1_{i-1} = u_{i-1} - \sqrt{g h_{i-1}} & & \lambda^1_m = u_m - \sqrt{g h_m} \\ \lambda^2_{m} = u_{m} - \sqrt{g h_{m}} & & \lambda^2_i = u_i - \sqrt{g h_i}.\end{aligned}$$Similar to the previous condition if any of these values in a row are separated by zero then we know we have a transonic rarefaction.The biggest impediment to using these conditions is that we need to know $q_m$. For simple systems this may not be too hard a burden as we know that if there is a transonic rarefaction there can be only one. Assuming there is one we can use the simplification that we need to know $\xi = x/t = 0$. For instance in the 1-rarefaction case we know$$\begin{aligned} \widehat{h}_{i-1/2} &= \frac{\left(u_{i-1} + 2 \sqrt{g h_{i-1}} \right)^2}{9g} \\ \widehat{u}_{i-1/2} &= u_{i-1} - 2 \left( \sqrt{g h_{i-1}} - \sqrt{g \widehat{h}_{i-1/2}} \right)\end{aligned}$$ Harten-Hyman Entropy FixA easier and more general approach to entropy fixes is due to Harten and Hyman and is generally the approach used in many Clawpack solvers.The principle approach is this, suppose that a transonic rarefaction exists in the $k$-family and therefore $$ \lambda^k_\ell < 0 < \lambda^k_r$$ and with$$\begin{aligned} q^k_\ell &= Q_{i-1} + \sum^{k-1}_{p=1} \mathcal{W}^p \\ q^k_r &= q_\ell^k + \mathcal{W}^k,\end{aligned}$$in other words the state to the left and right of the rarefaction. Now replace the single wave $\mathcal{W}^k$ propagating with speed $\widehat{\lambda}^l$ by two waves$$ \mathcal{W}^k_\ell = \beta \mathcal{W}^k \quad \mathcal{W}^k_r = (1 - \beta) \mathcal{W}^k$$propagating at speeds $\lambda^k_\ell$ and $\lambda^k_r$ respectively. Maintaining conservation requires$$ \lambda^l_\ell \mathcal{W}^k_\ell + \lambda^k_r \mathcal{W}^k_r = \widehat{\lambda}^k \mathcal{W}^k$$and therefore$$ \beta = \frac{\lambda^k_r - \widehat{\lambda}^k}{\lambda^k_r - \lambda^k_\ell}.$$This amounts to splitting the wave into two pieces traveling to the left and right and therefore modifying the fluctuations $\mathcal{A}^\pm \Delta Q$. Numerical ViscosityOne way to view the entropy problem as mentioned before is that not enough viscosity is being introduced into the solution. Numerical viscosity can solve this for us and we can modify Roe's linearization to account for this. The numerical flux for Roe's method is$$\begin{aligned} F_{i-1/2} &= \frac{1}{2} [f(Q_{i-1}) + f(Q_i) ] - \frac{1}{2} \left | \widehat{A}_{i-1/2} \right | (Q_i - Q_{i-1}) \\ &=\frac{1}{2} [f(Q_{i-1}) + f(Q_i) ] - \frac{1}{2} \sum_p \left | \widehat{\lambda}^p_{i-1/2} \right | \mathcal{W}^p_{i-1/2}.\end{aligned}$$The sum in the last expression can be looked upon as being a form of viscosity.If a transonic rarefaction is present then we expect that one of the eigenvalues $\widehat{\lambda}^p_{i-1/2}$ is very close to zero and the corresponding term in the last sum will see very little viscosity. This is in fact often what we observe, a stationary shock where there should be none since the corresponding speed is identically zero.
###Code
def true_solution(x, t):
if t > 0:
t_vec = t * numpy.ones(x.shape)
return (x < 0) * -numpy.ones(x.shape) + \
(-t_vec < x) * (x <= 0) * (x / t_vec + 1) + \
(0 <= x) * (x <= 2*t_vec) * x / t_vec + \
(2 * t_vec <= x) * 2.0 * numpy.ones(x.shape)
else:
return (x < 0) * -numpy.ones(x.shape) + \
(0.0 <= x) * 2.0 * numpy.ones(x.shape)
def burgers_animation(order=2, efix=True):
solver = pyclaw.ClawSolver1D(riemann.burgers_1D_py.burgers_1D)
solver.kernel_language = "Python"
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.extrap
solver.bc_upper[0] = pyclaw.BC.extrap
solver.order = order
x = pyclaw.Dimension(-3.0, 3.0, 50, name='x')
domain = pyclaw.Domain(x)
num_eqn = 1
state = pyclaw.State(domain, num_eqn)
xc = domain.grid.x.centers
state.q[0,:] = (xc < 0) * -numpy.ones(xc.shape) + 2.0 * (xc >= 0) * numpy.ones(xc.shape)
state.problem_data['efix'] = efix
claw = pyclaw.Controller()
claw.tfinal = 1.0
claw.num_output_times = 10
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.keep_copy = True
claw.run()
x = claw.frames[0].grid.dimensions[0].centers
fig = plt.figure()
axes = plt.subplot(1, 1, 1)
axes.set_xlim((x[0], x[-1]))
axes.set_ylim((-1.5, 2.5))
axes.set_title("Burgers Equation")
def init():
axes.set_xlim((x[0], x[-1]))
axes.set_ylim((-1.5, 2.5))
computed_line, = axes.plot(x[0], claw.frames[0].q[0, :][0], 'bo-')
true_line, = axes.plot(x[0], claw.frames[0].q[0, :][0], 'k-')
return (computed_line, true_line)
computed_line, true_line = init()
def fplot(n):
computed_line.set_data([x,], [claw.frames[n].q[0, :]])
true_line.set_data([x,], [true_solution(x, claw.frames[n].t)])
return (computed_line, true_line)
frames_to_plot = range(0, len(claw.frames))
plt.close(fig)
return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100,
blit=True, init_func=init, repeat=False)
HTML(burgers_animation(order=1, efix=False).to_jshtml())
###Output
_____no_output_____
###Markdown
If we implement the above idea instead using the wave-propagation formulation and $\mathcal{A}^\pm \Delta Q$ we get an additional detail with the numerical flux written as$$ F_{i-1/2} = \frac{1}{2} [f(Q_{i-1}) + f(Q_i) ] - \frac{1}{2} \sum_p \left [ (\widehat{\lambda}^p_{i-1/2})^+ - (\widehat{\lambda}^p_{i-1/2})^- \right ] \mathcal{W}^p_{i-1/2}$$that allows us to apply the Harten-Hyman entropy fix. Harten's Entropy FixAnother entropy fix proposed by Harten is based on increasing the viscosity only by modifying the field that contains the eigenvalue that may be too close to zero. This follows that we replace $|\widehat{\lambda}^p_{i-1/2})|$ by a limited value$$ \phi_\delta(\widehat{\lambda}^p_{i-1/2}))$$where$$ \phi_\delta(\lambda) = \left \{ \begin{aligned} &|\lambda| & & \text{if } \lambda \geq \delta \\ &\frac{\lambda^2 + \delta^2}{2 \delta} & & \text{if } \lambda < \delta \end{aligned} \right .$$which effectively changes the absolute value in the original Roe flux to be perturbed from zero. Unfortunately this approach requires tuning the parameter $\delta$ for each problem. Failure of Linearized SolversLinearized solvers can be a powerful way to reduce the computational cost of finite volume solvers but when might they go wrong? One of the most common happens near "vacuum states", states where one of the conserved quantities goes to zero. For the Euler equations this occurs when $\rho \rightarrow 0$ and in the shallow water equations when $h \rightarrow 0$. For both of these cases we require $\rho, h \geq 0$. So what goes wrong? We have assumed that the eigenvectors will intersect somewhere similar to where the true Hugoniot loci or integral curves intersect. HLL and HLLE SolversAnother approach to an approximate Riemann solver uses only two waves regardless of the true number of waves. This involves estimating the waves that form the edges of the Riemann fan and using these waves with one intermediate state. Define the two waves now as$$ \mathcal{W}^1_{i-1/2} = \widehat{Q}_{i-1/2} - Q_{i-1} \quad \mathcal{W}^2_{i-1/2} = Q_{i} - \widehat{Q}_{i-1/2}$$where $\widehat{Q}_{i-1/2}$ is the middle state. Requiring conservation we want these waves to satisfy$$\begin{aligned} f(Q_i) - f(Q_{i-1}) &= \sum^2_{p=1} s^p_{i-1/2} \mathcal{W}^p_{i-1/2} \\ &= s^1_{i-1/2} \mathcal{W}^1_{i-1/2} + s^2_{i-1/2} \mathcal{W}^2_{i-1/2} \\ &= s^1_{i-1/2} (\widehat{Q}_{i-1/2} - Q_{i-1}) + s^2_{i-1/2} (Q_{i} - \widehat{Q}_{i-1/2})\end{aligned}$$implying$$ \widehat{Q}_{i-1/2} = \frac{f(Q_i) - f(Q_{i-1}) - s^2_{i-1/2} Q_i + s^1_{i-1/2} Q_{i-1}}{s^1_{i-1/2} - s^2_{i-1/2}}.$$This approach was originally suggested by Harten, Lax and Van Lear with Einfeldt suggesting a choice of $s^1$ and $s^2$ of$$\begin{aligned} s^1_{i-1/2} &= \min_p \left( \min \left(\lambda^p_i, \widehat{\lambda}^p_{i-1/2} \right ) \right ) \\ s^2_{i-1/2} &= \max_p \left( \max \left(\lambda^p_{i+1}, \widehat{\lambda}^p_{i-1/2} \right ) \right )\end{aligned}$$where $\lambda^p_j$ is the $p$th eigenvalue of the Jacobian $f'(Q_j)$ and $\widehat{\lambda}^p_{i-1/2}$ is the $p$th eigenvalue of the Roe average values. Note that this choice of speeds reduces to the Roe approximation when the waves chosen are shocks. In the case where these are rarefactions these speeds will take the leading edge of the rarefaction.The fact however that we are only using two waves to represent the full Riemann fan has an obvious disadvantage if you want the details of the Riemann problem to be used. High-Resolution MethodsWe can also extend Godunov's method to the high-resolution methods already discussed and are essentially the same as for linear systems. The method we studied already takes the form$$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{\Delta x} \left( \mathcal{A}^-_{i+1/2} + \mathcal{A}^+ \Delta Q_{i-1/2} \right )- \frac{\Delta t}{\Delta x} \left( \widetilde{F}_{i+1/2} - \widetilde{F}_{i-1/2} \right )$$with$$ \widetilde{F}_{i-1/2} = \frac{1}{2} \sum^{M_w}_{p=1} |s^p_{i-1/2} | \left ( 1- \frac{\Delta t}{\Delta x} |s^p_{i-1/2}| \right) \widetilde{\mathcal{W}}^p_{i-1/2}$$where $\widetilde{\mathcal{W}}^p_{i-1/2}$ is a limited version of $\mathcal{W}^p_{i-1/2}$. There are several complications for nonlinear systems with this approach. For shock waves the general approach still works but if a wave is a rarefaction the definition of the speed is less clear. In practice $$ s^p = \frac{1}{2} (\lambda^p_\ell + \lambda^p_r)$$is often used. The limiters can also be problematic as waves from neighboring grid cells edges may not be collinear so it is not clear that comparing the magnitude of these vectors is not clearly the right thing to do. This similar to variable-coefficient linear systems and can be addressed similarly.In `clawpack` the general approach is to project the neighboring waves onto the wave being limited to obtain a vector that can be directly compared.
###Code
def shock_tube(riemann_solver, efix=False):
solver = pyclaw.ClawSolver1D(riemann_solver)
solver.kernel_language = "Python"
solver.num_waves = 3
solver.bc_lower[0] = pyclaw.BC.wall
solver.bc_upper[0] = pyclaw.BC.wall
x = pyclaw.Dimension(-1.0, 1.0, 800, name='x')
domain = pyclaw.Domain([x])
state = pyclaw.State(domain, 3)
# Ratio of specific heats
gamma = 1.4
state.problem_data['gamma'] = gamma
state.problem_data['gamma1'] = gamma - 1.0
state.problem_data['efix'] = efix
x = state.grid.x.centers
rho_l = 1.; rho_r = 1./8
p_l = 1.; p_r = 0.1
state.q[0 ,:] = (x<0.)*rho_l + (x>=0.)*rho_r
state.q[1,:] = 0.
velocity = state.q[1, :] / state.q[0,:]
pressure = (x<0.)*p_l + (x>=0.)*p_r
state.q[2 ,:] = pressure / (gamma - 1.) + 0.5 * state.q[0,:] * velocity**2
claw = pyclaw.Controller()
claw.tfinal = 0.4
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.num_output_times = 10
claw.keep_copy = True
claw.run()
fig, axes = plt.subplots(1, 2)
fig.set_figwidth(fig.get_figwidth() * 2)
def init():
density_line, = axes[0].plot(x[0], claw.frames[0].q[0, :][0], 'k')
axes[0].set_title(r"Density $\rho$")
axes[0].set_xlim((-1, 1))
axes[0].set_ylim((-0.1, 1.25))
energy_line, = axes[1].plot(x[0], claw.frames[0].q[2, :][0], 'k')
axes[1].set_title(r"Energy $E$")
axes[1].set_xlim((-1, 1))
axes[1].set_ylim((-0.1, 4.0))
return (density_line, energy_line)
density_line, energy_line = init()
def fplot(n):
density_line.set_data([x,], [claw.frames[n].q[0, :]])
energy_line.set_data([x,], [claw.frames[n].q[2, :]])
axes[0].set_title(r"$\rho$ at $t = %s$" % claw.frames[n].t)
axes[1].set_title(r"$E$ at $t = %s$" % claw.frames[n].t)
return (density_line, energy_line)
frames_to_plot = range(0, len(claw.frames))
plt.close(fig)
return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100,
blit=True, init_func=init, repeat=False)
HTML(shock_tube(riemann.euler_1D_py.euler_hllc_1D, efix=True).to_jshtml())
def woodward_colella_blast(riemann_solver):
solver = pyclaw.ClawSolver1D(riemann_solver)
solver.kernel_language = "Python"
solver.num_waves = 3
solver.limiters = 4
solver.bc_lower[0] = pyclaw.BC.wall
solver.bc_upper[0] = pyclaw.BC.wall
x = pyclaw.Dimension(0.0, 1.0, 800, name='x')
domain = pyclaw.Domain([x])
state = pyclaw.State(domain, 3)
# Ratio of specific heats
gamma = 1.4
state.problem_data['gamma'] = gamma
state.problem_data['gamma1'] = gamma - 1.0
x = state.grid.x.centers
state.q[0, :] = 1.0
state.q[1, :] = 0.0
state.q[2, :] = ( (x < 0.1) * 1.e3
+ (0.1 <= x) * (x < 0.9) * 1.e-2
+ (0.9 <= x) * 1.e2 ) / (gamma - 1.0)
claw = pyclaw.Controller()
claw.tfinal = 0.05
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.num_output_times = 20
claw.keep_copy = True
claw.run()
fig, axes = plt.subplots(1, 2)
fig.set_figwidth(fig.get_figwidth() * 2)
axes[0].set_title(r"Density $\rho$")
axes[0].set_xlim((0, 1))
axes[0].set_ylim((-0.1, 15.0))
axes[1].set_title(r"Energy $E$")
axes[1].set_xlim((0, 1))
axes[1].set_ylim((-0.1, 2600.0))
def init():
density_line, = axes[0].plot(x[0], claw.frames[0].q[0, :][0], 'k')
axes[0].set_title(r"Density $\rho$")
axes[0].set_xlim((0, 1))
axes[0].set_ylim((-0.1, 15.0))
energy_line, = axes[1].plot(x[0], claw.frames[0].q[2, :][0], 'k')
axes[1].set_title(r"Energy $E$")
axes[1].set_xlim((0, 1))
axes[1].set_ylim((-0.1, 2600.0))
return (density_line, energy_line)
density_line, energy_line = init()
def fplot(n):
density_line.set_data([x,], [claw.frames[n].q[0, :]])
energy_line.set_data([x,], [claw.frames[n].q[2, :]])
axes[0].set_title(r"$\rho$ at $t = %s$" % claw.frames[n].t)
axes[1].set_title(r"$E$ at $t = %s$" % claw.frames[n].t)
return (density_line, energy_line)
frames_to_plot = range(0, len(claw.frames))
plt.close(fig)
return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100,
blit=True, init_func=init, repeat=False)
# HTML(woodward_colella_blast(riemann.euler_1D_py.euler_roe_1D).to_jshtml())
# HTML(woodward_colella_blast(riemann.euler_1D_py.euler_hll_1D).to_jshtml())
HTML(woodward_colella_blast(riemann.euler_1D_py.euler_hllc_1D).to_jshtml())
###Output
_____no_output_____
###Markdown
Alternative Wave-Propagation Implementations for Approximate Riemann SolversA sometimes useful alternative approach to splitting the jump $Q_i - Q_{i-1}$ into waves is to instead split the jump in the fluxes with$$ f(Q_i) - f(Q_{i-1}) = \sum^{M_w}_{p=1} \mathcal{Z}^p_{i-1/2}.$$This alternative is useful in developing approximate solvers, handling source terms, and showing second order accuracy. The advantage of this approach to linearized solvers is that we are automatically satisfying Roe's condition. Assuming we have a linearized problem we can then project the jump in fluxes onto the eigenspace$$ f(Q_i) - f(Q_{i-1}) = \sum^m_{p=1} \beta^p_{i-1/2} \widehat{r}^p_{i-1/2}$$and then defining the **f-waves** as$$ \mathcal{Z}^p_{i-1/2} = \beta^p_{i-1/2} \widehat{r}^p_{i-1/2}.$$We can also define the fluctuations as$$\begin{aligned} &\widehat{\mathcal{A}}^- \Delta Q_{i-1/2} = \sum_{p:s^p_{i-1/2} < 0} \mathcal{Z}^p_{i-1/2} \\ &\widehat{\mathcal{A}}^+ \Delta Q_{i-1/2} = \sum_{p:s^p_{i-1/2} > 0} \mathcal{Z}^p_{i-1/2}\end{aligned}$$Implying that Roe's method is satisfied regardless of the linearization employed. For example the arithmetic average defined linearization$$ \widehat{\mathcal{A}}_{i-1/2} = f'\left(\frac{1}{2}(Q_{i} + Q_{i-1})\right)$$will produce a conservative method where as the original wave-propagation method may not. We can also relate the types of waves, if all speeds $s^p_{i-1/2}$ are nonzero then$$ \mathcal{W}^p_{i-1/2} = \frac{1}{s^p_{i-1/2}} \mathcal{Z}^p_{i-1/2}.$$ The second order correction terms are also slightly different. The flux used should not be$$ \widetilde{F}_{i-1/2} = \frac{1}{2} \sum^{M_w}_{p=1} \text{sgn}(s^p_{i-1/2}) \left( 1- \frac{\Delta t}{\Delta x} |s^p_{i-1/2}| \right) \widetilde{\mathcal{Z}}^p_{i-1/2}.$$ Second-Order AccuracyOne thing we have not yet discussed is whether the method we have proposed is truly second-order accurate for smooth solutions when limiters are not used. We know that the scalar theory does imply this but does it extend to systems? First we must compute the local truncation error keeping in mind we are assuming that the solution is smooth at this time. We will of course use Taylor series for this and desire to replace some of the terms of $$ q(x_i, t_{n+1}) = q(x_i, t_n) - \Delta t f(q)_x + \frac{\Delta t^2}{2} q(x_i, t_n)_{tt} + \mathcal{O}(\Delta t^3).$$For the conservation law we can compute the second time derivative so that$$\begin{aligned} q_t & = -f(q)_x \\ q_{tt} &= -(f'(q) q_t)_x = [f'(q) f(q)_x]_x\end{aligned}$$implying that$$ q(x_i, t_{n+1}) = q(x_i, t_n) - \Delta t f(q)_x + \frac{\Delta t^2}{2} [f'(q) f(q)_x]_x + \mathcal{O}(\Delta t^3).$$ We now assume that the method uses the f-wave approach, splitting the jump in fluxes$$ f(Q_i) - f(Q_{i-1}) = \sum^m_{p=1} \mathcal{Z}^p_{i-1/2}$$where $\mathcal{Z}^p_{i-1/2}$ are assumed to be eigenvectors of some matrix $\widehat{A}_{i-1/2}(Q_i, Q_{i-1})$. We will also use the definition$$ \mathcal{Z}^p_{i-1/2} = s^p_{i-1/2} \mathcal{W}^p_{i-1/2}$$to relate this to the original wave-propagation method. We must now make a couple of assumption about the consistency of $\widehat{A}_{i-1/2}$ with the Jacobian. This takes the form of$$ \widehat{A}_{i-1/2}(q(x), q(x + \Delta x)) = f'(q(x + \Delta x / 2)) + E(x, \Delta x)$$where the error satisfies$$ E(x, \Delta x) = \mathcal{O}(\Delta x)$$and$$ \frac{E(x + \Delta x, \Delta x) - E(x, \Delta x)}{\Delta x} = \mathcal{O}(\Delta x).$$In the end we then want$$ \widehat{A}(q(x), q(x + \Delta x)) = f'(q(x + \Delta x/2)) + \mathcal{O}(\Delta x^2)$$and therefore we can choose$$ \widehat{A}(Q_{i}, Q_{i-1}) = f'(\widehat{Q}_{i-1/2}).$$Note that this also implies that $\widehat{A}$ need only be a first order accurate approximation to $f'(q)$ at the midpoint. Now for the fun part, writing out the update in all of its "glory":$$\begin{aligned} Q^{n+1}_i &= Q^n_i - \frac{\Delta t}{\Delta x} \left[ \sum_{p:s^p_{i-1/2} > 0} \mathcal{Z}^p_{i-1/2} + \sum_{p:s^p_{i-1/2} > 0} \mathcal{Z}^p_{i+1/2} \right] - \frac{\Delta t}{2 \Delta x} \left[ \sum^m_{p=1} \text{sgn}(s^p_{i+1/2}) \left(1 - \frac{\Delta t}{\Delta x} |s^p_{i+1/2}| \right) \mathcal{Z}^p_{i+1/2} - \sum^m_{p=1} \text{sgn}(s^p_{i-1/2}) \left(1 - \frac{\Delta t}{\Delta x} |s^p_{i-1/2}| \right) \mathcal{Z}^p_{i-1/2}\right] \\ &= Q^n_i - \frac{\Delta t}{2 \Delta x} \left[ \sum^m_{p=1} \mathcal{Z}^p_{i-1/2} + \sum_{p=1} \mathcal{Z}^p_{i+1/2} \right] + \frac{\Delta t^2}{2 \Delta x^2} \left[\sum_{p=1} s^p_{i+1/2} \mathcal{Z}^p_{i+1/2} - \sum^m_{p=1} s^p_{i-1/2} \mathcal{Z}^p_{i-1/2} \right] \\ &= Q^n_i - \frac{\Delta t}{2 \Delta x} \left[ \sum^m_{p=1} \mathcal{Z}^p_{i-1/2} + \sum_{p=1} \mathcal{Z}^p_{i+1/2} \right] + \frac{\Delta t^2}{2 \Delta x^2} \left[ \widehat{A}_{i+1/2} \sum_{p=1} \mathcal{Z}^p_{i+1/2} - \widehat{A}_{i-1/2} \sum^m_{p=1}\mathcal{Z}^p_{i-1/2} \right].\end{aligned}$$ Using the continuity assumption then leads to $$ Q^{n+1}_i = Q^n_i - \frac{\Delta t}{2 \Delta x} [f(Q_{i+1}) - f(Q_{i-1})] - \frac{\Delta t^2}{2 \Delta x^2} \left \{ \widehat{A}_{i+1/2} [f(Q_{i+1}) - f(Q_i)] - \widehat{A}_{i-1/2} [f(Q_i) - f(Q_{i-1})] \right \},$$which agrees with the Taylor series$$ q(x_i, t_{n+1}) = q(x_i, t_n) - \Delta t f(q)_x + \frac{\Delta t^2}{2} [f'(q) f(q)_x]_x + \mathcal{O}(\Delta t^3).$$to the required accuracy. Total Variation for SystemsAs mentioned previously the notion of TV-stability cannot naturally be extended to systems and therefore there is no proof that even Godunov's method converges for general systems of nonlinear conservation laws. The situation is actually worse than that, in general there is no proof of an existence of a solution for general nonlinear systems. It bears therefore some merit in delving into what has been done and why TV-stability fails in this situation. One might try to define TV for a system as the following:$$ TV(q) = \sup \sum^N_{j=1} ||q(\xi_j) - q(\xi_{j-1})||$$with some arbitrary discretization of the domain. If we restrict our attention to piecewise constant grid functions on the full real-line then this reduces to$$ TV(Q) = \sum^\infty_{i=-\infty} ||Q_i - Q_{i-1}||.$$We could hope that if the above definitions hold that we could prove a similar type of stability and therefore convergence.However we run into a problem as the true solution itself is not TVD. In fact we can choose initial conditions that can cause the TV(Q) to arbitrarily grow (but finite).
###Code
def swe_rp(h, u=[0.0, 0.0], N=100, ylimits=((0.0, 3.5), (-0.5, 2))):
solver = pyclaw.ClawSolver1D(riemann.shallow_1D_py.shallow_fwave_1d)
solver.kernel_language = "Python"
solver.num_waves = 2
solver.num_eqn = 2
solver.fwave = True
solver.limiters = [pyclaw.limiters.tvd.MC,
pyclaw.limiters.tvd.MC]
solver.bc_lower[0] = pyclaw.BC.extrap
solver.bc_upper[0] = pyclaw.BC.extrap
solver.aux_bc_lower[0] = pyclaw.BC.extrap
solver.aux_bc_upper[0] = pyclaw.BC.extrap
x = pyclaw.Dimension(-5.0, 5.0, N, name='x')
domain = pyclaw.Domain(x)
state = pyclaw.State(domain, 2, 1)
xc = domain.grid.x.centers
state.q[0,:] = h[0] * (xc < 0) * numpy.ones(xc.shape) + h[1] * (xc >= 0) * numpy.ones(xc.shape)
state.q[1,:] = u[0] * (xc < 0) * numpy.ones(xc.shape) + u[1] * (xc >= 0) * numpy.ones(xc.shape)
state.q[1,:] *= state.q[0, :]
state.aux[0, :] = numpy.zeros(xc.shape)
state.problem_data['grav'] = 1.0
state.problem_data['dry_tolerance'] = 1e-3
state.problem_data['sea_level'] = 0.0
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.num_output_times = 10
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.keep_copy = True
claw.run()
x = claw.frames[0].grid.dimensions[0].centers
fig, axes = plt.subplots(1, 2)
fig.set_figwidth(fig.get_figwidth() * 2)
axes[0].set_xlim((x[0], x[-1]))
axes[0].set_ylim(ylimits[0])
axes[0].set_title(r"$h$")
axes[1].set_xlim((x[0], x[-1]))
axes[1].set_ylim(ylimits[1])
axes[1].set_title(r"$hu$")
def init():
axes[0].set_xlim((x[0], x[-1]))
axes[0].set_ylim(ylimits[0])
h_line, = axes[0].plot(x[0], claw.frames[0].q[0, :][0], 'bo-')
axes[1].set_xlim((x[0], x[-1]))
axes[1].set_ylim(ylimits[1])
hu_line, = axes[1].plot(x[0], claw.frames[0].q[1, :][0], 'bo-')
return (h_line, hu_line)
h_line, hu_line = init()
def fplot(n):
h_line.set_data([x,], [claw.frames[n].q[0, :]])
hu_line.set_data([x,], [claw.frames[n].q[1, :]])
axes[0].set_title(r"$h$ at $t = %s$" % claw.frames[n].t)
axes[1].set_title(r"$hu$ at $t = %s$" % claw.frames[n].t)
return (h_line, hu_line)
frames_to_plot = range(0, len(claw.frames))
plt.close(fig)
return matplotlib.animation.FuncAnimation(fig, fplot, frames=frames_to_plot, interval=100,
blit=True, init_func=init, repeat=False)
HTML(swe_rp(h=[1, 1], u=[1, -1], ylimits=((0, 2.6), (-1.1, 1.1))).to_jshtml())
###Output
_____no_output_____ |
Assignment 6.ipynb | ###Markdown
5.a
###Code
def f(x):
return (1/np.sqrt(2*np.pi)) * (np.exp(-np.square(x)/2))
# Generate random numbers
randoms_40000_1 = np.random.uniform(low=0, high=1, size=(40000))
randoms_40000_2 = np.random.uniform(low=0, high=1, size=(40000))
randoms_40000_3 = np.random.uniform(low=0, high=1, size=(40000))
# Calculate f
f_randoms_40000_1 = f(randoms_40000_1)
f_randoms_40000_2 = f(randoms_40000_2)
f_randoms_40000_3 = f(randoms_40000_3)
# Calculate means
I_40000_1 = np.mean(f_randoms_40000_1)
I_40000_2 = np.mean(f_randoms_40000_2)
I_40000_3 = np.mean(f_randoms_40000_3)
# Print values
print(f"I_40000_1 = {I_40000_1}")
print(f"I_40000_2 = {I_40000_2}")
print(f"I_40000_3 = {I_40000_3}")
###Output
I_40000_1 = 0.3410851456229389
I_40000_2 = 0.34130154668424395
I_40000_3 = 0.34155179379915684
###Markdown
5.c
###Code
# Generate random points
x1 = np.random.uniform(low=0, high=1, size=(40000))
y1 = np.random.uniform(low=0, high=1, size=(40000))
x2 = np.random.uniform(low=0, high=1, size=(40000))
y2 = np.random.uniform(low=0, high=1, size=(40000))
x3 = np.random.uniform(low=0, high=1, size=(40000))
y3 = np.random.uniform(low=0, high=1, size=(40000))
# Calculate proportion with y <= f(x)
num_y_less_than_fx1 = np.where(y1 <= f(x1))[0].size
num_y_less_than_fx2 = np.where(y1 <= f(x2))[0].size
num_y_less_than_fx3 = np.where(y1 <= f(x3))[0].size
proportion_y_less_than_fx1 = num_y_less_than_fx1 / x1.size
proportion_y_less_than_fx2 = num_y_less_than_fx2 / x2.size
proportion_y_less_than_fx3 = num_y_less_than_fx3 / x3.size
# Print values
print(f"proportion_y_less_than_fx1 = {proportion_y_less_than_fx1}")
print(f"proportion_y_less_than_fx2 = {proportion_y_less_than_fx2}")
print(f"proportion_y_less_than_fx3 = {proportion_y_less_than_fx3}")
###Output
proportion_y_less_than_fx1 = 0.34635
proportion_y_less_than_fx2 = 0.347375
proportion_y_less_than_fx3 = 0.3459
###Markdown
Omphemetse Mangope Advanced Machine Learning Assignment 6: Training Neural net using Negative Loglikelihood Due Date: 17 June 2020
###Code
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
import numpy as np
data = pd.read_csv('weightdataset.csv',sep=';')
data.head(2)
x = scale(data.Weight) # Data scaling
y = scale(data.Height) # Data Scaling
w_input = np.random.random(2) # Randomly assigning weights
x_train, x_test,y_train, y_test = train_test_split(x,y, test_size=0.3) # Splitting data into 30% test and 70% train
###Output
_____no_output_____
###Markdown
Functions
###Code
def des(x): # Design matrix for x values
n = len(x)
X = np.c_[np.ones(n), x]
return(X)
#Please note: Since the bias term is 1, it is treated as an x values in this case which,
#is just ones throughou the whole observations
def hidden(p,w_in): # hidden node computation
h = p.dot(w_in)
return(h)
def sigmoid(x): # Sigmoid Function
return(1/(1 + np.exp(-x)))
def der_sigmoid(x):
return(sigmoid(x) * (1 - sigmoid(x)))
def y_w(x,w): # Derivative of y_pred with respect to weights
return((1/(1 + np.exp(-x*w)))*(1/(1 + np.exp(-x*w))) * x)
X_train = des(x_train) # Fitting training data to design Matrix
X_test = des(x_test) # Fitting testing data to design matrix
u11 = hidden(X_train,w_input) # Calculating the output of the hidden without sigmoid
o11 = sigmoid(X_train) # Output of the sigmoid function, note: 011=y_pred
derivative = y_w(X_test,w_input) # Derivative of y_pred with respect to w
# Sigmoid function has been used to squash the values of y_train to lie between 0 and 1
# Therefore, the data shows that the data solution lies between zero and therefore, the solution can be chosen between 0 and 1
plt.figure(figsize=(14,6))
plt.plot(X_train, o11, 'b*')
plt.title("Output values of train data fitted to sigmoid function")
plt.xlabel('x')
plt.ylabel('y')
plt.show()
epochs = 100 # Number of iterations
lr = 0.001 # learning rate
y_pred = sigmoid(X_test) # prediction
def optimize(y_i,y_pred,w,x,epochs,lr, derivative):
cost = []
weights = []
nll = 0
n = len(x)
for i in range(epochs):
y_i = y_i.reshape(len(y_i),1) # reshaping y values to be to have n x 1 dimensions
nll = -np.sum((y_i *np.log(w) + (1 - y_i)*np.log(1 - w))) # negative Loglikelihood cost function
err = derivative * nll # Backpropagation. derivative of sigmoid is defined under functions section above
cost.append(err[-1])
w = w - (lr*(1/n)*sum(y_pred - y_i))
weights.append(w)
return(cost, weights)
error,updated_weights = optimize(y_test,y_pred,w_input,X_test,epochs,lr,der_sigmoid(X_test))
weights = updated_weights[-1]
weights # weights updated
plt.figure(figsize=(14,6))
plt.plot(error, color = 'r')
plt.title('Negative loglikelihood Cost')
plt.xlabel('Number of iterations')
plt.ylabel('Cost')
plt.show()
###Output
_____no_output_____
###Markdown
Mean life expectancy of Asian countries
###Code
Asian.lifeExp.mean()
fig=px.box(Asian, y='lifeExp')
fig.show()
###Output
_____no_output_____
###Markdown
Deviation in GDP of each country in europe and south America
###Code
data.std()
Gdp_dev=data.groupby(['country','continent'])['gdpPercap'].std().reset_index()
EuropeGdp=Gdp_dev[Gdp_dev['continent']=='Europe']
fig=px.line(EuropeGdp, y='gdpPercap', x='country', title="EUROPE STANDARD DEVIATION GDP PER CAP ")
fig.show()
Gdp_dev=data.groupby(['country','continent'])['gdpPercap'].std().reset_index()
EuropeGdp=Gdp_dev[Gdp_dev['continent']=='Americas']
fig=px.line(EuropeGdp, y='gdpPercap', x='country', title="Americas STANDARD DEVIATION GDP PER CAP ")
fig.show()
###Output
_____no_output_____
###Markdown
Change in population of the African countries in 3 decades
###Code
data.dtypes
population=data[data['continent']=='Africa'][['year','country','pop']]
population
population=population[(population['year']==1987) | (population['year']==1997) | (population['year']==2007)]
population
fig=px.bar(population, y='pop', x='country', color='year')
fig.update_layout(
autosize=False,
width=1000,
height=800,)
fig.show()
###Output
_____no_output_____
###Markdown
Import Modules
###Code
import requests
import pandas as pd
website_url = requests.get("https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M").text
###Output
_____no_output_____
###Markdown
get website data
###Code
from bs4 import BeautifulSoup
soup = BeautifulSoup(website_url,"lxml")
###Output
_____no_output_____
###Markdown
iterate through table rows
###Code
pcs = []
prevP = ""
currN = ""
prevB = ""
for table_row in soup.select("table.wikitable tr"):
cells = table_row.findAll('td')
if len(cells) > 0:
pc = cells[0].text.strip()
b = cells[1].text.strip()
n = cells[2].text.strip()
if ((b == "Not assigned" and n=="Not assigned" or b == "blank")):
a="skipping"
else:
if (n =="Not Assigned"):
n = b; #neighbourhood becomes borough
if (pc == prevP):
currN = currN + "," + n #seperate if more than one with comma
else:
if (prevP != ""):
pcs.append([prevP,prevB,currN])
prevP = pc
prevB = b
currN = n
pcs.append([prevP,prevB,currN])
df = pd.DataFrame(pcs,columns=['Postal Code','Borough','Neighborhood']).sort_values(by=['Postal Code'])
###Output
_____no_output_____
###Markdown
show first 5 enties
###Code
df.head()
df.shape
import pandas as pd
import requests
import csv
import pandas as pd
import io
import requests
url="http://cocl.us/Geospatial_data"
s=requests.get(url).content
df2=pd.read_csv(io.StringIO(s.decode('utf-8')))
df2.head()
df3 = pd.merge(df, df2, on="Postal Code")
import folium
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.cm as cm
import matplotlib.colors as colors
from geopy.geocoders import Nominatim
import datetime
from pandas.io.json import json_normalize
%matplotlib inline
data = df3[df3['Borough'].str.contains('Toronto', regex = False)].reset_index(drop=True)
data.head()
col_names = ['Postal Code','Borough','Neighborhood','Latitude','Longitude']
toronto_neigh = pd.DataFrame(columns = col_names)
#toronto_neigh
for i in range(data.shape[0]):
postcode = data.loc[i, 'Postal Code']
borough = data.loc[i, 'Borough']
lat = data.loc[i, 'Latitude'].astype(float)
lng = data.loc[i, 'Longitude'].astype(float)
neigh = data.loc[i,'Neighborhood'].split(", ")
for j in range(len(neigh)):
toronto_neigh = toronto_neigh.append(pd.DataFrame(np.array([[postcode, borough, neigh[j], lat, lng]]), columns = col_names))
toronto_neigh = toronto_neigh.reset_index(drop = True)
toronto_neigh.head()
# create folium map
toronto_map = folium.Map(location = [lat, lng], zoom_start = 11
)
for lat, lng, borough, neighborhood in zip(toronto_neigh['Latitude'], toronto_neigh['Longitude'], toronto_neigh['Borough'],toronto_neigh['Neighborhood']):
label = '{}, {} ({}, {})'.format(neighborhood, borough, lat, lng)
label = folium.Popup(label, parse_html= True)
folium.CircleMarker([float(lat),float(lng)],
radius = 3,
popup = label,
color = 'red',
fill = True,
fill_color = '#a72920',
fill_opacity = 0.5,
parse_html = False).add_to(toronto_map)
display(toronto_map)
###Output
_____no_output_____
###Markdown
api call data
###Code
now = datetime.datetime.now()
date = "%4d%02d%02d" % (now.year, now.month, now.day)
CLIENT_ID = '3ECJQTXHODVLXC0PN5LT5NM2ABWKXK4YORSKACOYAQ1RBOU1' # Foursquare ID
CLIENT_SECRET = '0XCMHV3VM5B3MDYANVU20ARUNNHL2LOPJ0DZNQYOYSJWTZ41' # Foursquare Secret
print('Your credentails:')
print('CLIENT_ID: ' + CLIENT_ID)
print('CLIENT_SECRET:' + CLIENT_SECRET)
VERSION = date
i = 0
latitude = toronto_neigh.loc[i, 'Latitude'] # neighborhood latitude value
longitude = toronto_neigh.loc[i, 'Longitude'] # neighborhood longitude value
neighborhood_name = toronto_neigh.loc[i, 'Neighborhood'] # neighborhood name
radius = 500
limit = 100
url = "https://api.foursquare.com/v2/venues/search?client_id={}&client_secret={}&ll={},{}&v={}&radius={}&limit={}".format(
CLIENT_ID,
CLIENT_SECRET,
latitude,
longitude,
VERSION,
radius,
limit)
results = requests.get(url).json()
def get_category_type(row):
try:
categories_list = row['categories']
except:
categories_list = row['venue.categories']
if len(categories_list) == 0:
return None
else:
return categories_list[0]['name']
###Output
_____no_output_____
###Markdown
call for venue data from neighborhoods
###Code
venues = results['response']['venues']
nearby_venues = json_normalize(venues)
# filter columns
filtered_columns = ['name', 'categories', 'location.lat', 'location.lng']
nearby_venues =nearby_venues.loc[:, filtered_columns]
nearby_venues['categories'] = nearby_venues.apply(get_category_type, axis=1)
nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns]
def getNearbyVenues(names, latitudes, longitudes, radius=500, LIMIT = 100):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT)
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
###Output
_____no_output_____
###Markdown
get venue data
###Code
toronto_venues = getNearbyVenues(names=toronto_neigh['Neighborhood'],
latitudes=toronto_neigh['Latitude'],
longitudes=toronto_neigh['Longitude']
)
toronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix="", prefix_sep="")
toronto_onehot['Neighborhood'] = toronto_venues['Neighborhood']
#neighborhood column to the first column
fixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1])
toronto_onehot = toronto_onehot[fixed_columns]
toronto_grouped = toronto_onehot.groupby(['Neighborhood']).mean().reset_index()
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
###Output
_____no_output_____
###Markdown
pre process data ready for kcluster
###Code
num_top_venues = 5
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = []
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighborhoods_venues_sorted = pd.DataFrame(columns=columns)
neighborhoods_venues_sorted['Neighborhood'] = toronto_grouped['Neighborhood']
fixed_columns = [neighborhoods_venues_sorted.columns[-1]] + list(neighborhoods_venues_sorted.columns[:-1])
neighborhoods_venues_sorted = neighborhoods_venues_sorted[fixed_columns]
for ind in np.arange(toronto_grouped.shape[0]):
neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)
# set number of clusters
kclusters = 5
toronto_grouped_clustering = toronto_grouped.drop('Neighborhood', 1)
#cluster
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering)
neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)
toronto_merged = toronto_neigh
# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood
toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood')
###Output
_____no_output_____
###Markdown
map data with clusters shown
###Code
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
markers_colors = []
for lat, lon, poi, cluster in zip(toronto_merged['Latitude'], toronto_merged['Longitude'], toronto_merged['Neighborhood'], toronto_merged['Cluster Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[float(lat), float(lon)],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
###Output
_____no_output_____
###Markdown
1. Map The mean life expectancy of all the Asian countries according to the data.
###Code
asian_life_expectancy=Asian.groupby(["country",'iso_alpha'])["lifeExp"].mean().reset_index()
asian_life_expectancy.head()
px.choropleth(asian_life_expectancy, locations="iso_alpha",color='lifeExp',
color_continuous_scale="Viridis",scope="asia",hover_name="country")
###Output
_____no_output_____
###Markdown
2. Deviation in GDP of each country in Europe and South America.
###Code
Eurosa=df[(df.continent=="Europe") |(df.continent=="South America")]
Eurosa.head()
dev=Eurosa.groupby(["country",'iso_alpha'])["gdpPercap"].std().reset_index()
dev.head()
px.choropleth(dev, locations="iso_alpha",color='gdpPercap',
color_continuous_scale="Viridis",scope="europe",hover_name="country")
###Output
_____no_output_____
###Markdown
3. The change in population of each African country in the last 3 decades.
###Code
df.year.unique()
df.continent.unique()
Africathen=df[(df.continent=="Africa")& (df.year==1977)]
Africathen.head()
Africanow=df[(df.continent=="Africa")& (df.year==2007)]
Africanow.head()
pop=Africanow[["pop","country"]]
popd=pop.rename(columns={"pop":"pop_2007"})
popd.head()
popchange=Africathen[["pop","country","iso_alpha"]]
popchange.head()
pchange=pd.merge(popd,popchange,how="inner",on="country")
pchange.head()
pchange["change"]=(pchange["pop_2007"]-pchange["pop"])*100/pchange["pop_2007"]
pchange.head()
px.choropleth(pchange, locations="iso_alpha",color='change',
color_continuous_scale="Viridis",scope="africa",hover_name="country")
###Output
_____no_output_____
###Markdown
###Code
createdFile = open("FCS.txt", 'w')
createdFile.write("ABC");
createdFile.close();
try:
file = open("FCS.txt",'r')
file.write("ABC")
except Exception as e:
print("The file gave us error - ", e)
file = open("FCS.txt",'r')
print(file.read())
finally:
file.close()
# ques 2 - unit test given number is it Prime or not
%%writefile Is_PrimeNumber.py
def isPrimeNumber(numberToCheck):
return (numberToCheck<=3) or ( (numberToCheck%2 != 0) and (numberToCheck%3 != 0))
%%writefile unittestof_Prime.py
import unittest
import Is_PrimeNumber
class TestPrime(unittest.TestCase):
def testprime_withPrime(self):
result= Is_PrimeNumber.isPrimeNumber(31)
self.assertEqual(result, True)
def testprime_withNonPrime(self):
result= Is_PrimeNumber.isPrimeNumber(4)
self.assertEqual(result, False)
if __name__ == '__main__':
unittest.main()
! python unittestof_Prime.py
###Output
..
----------------------------------------------------------------------
Ran 2 tests in 0.000s
OK
###Markdown
ASSIGNMENT 6 Question 1 Program to create a class name bank_account
###Code
class bank_account():
def __init__(self,ownerName,balance):
self.ownerName = ownerName
self.balance = balance
def deposit(self):
deposit_amt = input("Please enter the amount you want to DEPOSIT : ")
deposit_amt = int(deposit_amt)
self.balance= self.balance+deposit_amt
print("Amount is deposited !")
print("Your total balance is ",self.balance)
def withdraw(self):
withdrawal_amt= input("Please enter the amount you want to WITHDRAWAL : ")
withdrawal_amt = int(withdrawal_amt)
if withdrawal_amt < self.balance :
self.balance = self.balance-withdrawal_amt
print("Withdrawal Successful!")
print("Remainig Balance is ",self.balance)
else:
print("You don't have sufficent balance to withdraw.")
A001 = bank_account("John",45000)
A001.deposit()
A001.withdraw()
A001.deposit()
A001.withdraw()
###Output
Please enter the amount you want to WITHDRAWAL : 49000
Withdrawal Successful!
Remainig Balance is 50000
###Markdown
Question 2 Program to create cone
###Code
import math
class cone :
def __init__(self,radius,height):
self.radius = radius
self.height = height
def surfacearea(self):
base = (math.pi)*(math.pow(self.radius,2))
conical_surface =(math.pi)*self.radius*(math.sqrt((math.pow(self.radius,2))+(math.pow(self.height,2))))
print("Surface area of cone is-")
print("Base = ",base,"sq.units")
print("Conical Surface = ",conical_surface,"sq.units")
def volume(self):
volume =(math.pi)*(math.pow(self.radius,2))*self.height/3
print("Volume of cone = ",volume,"cubic units")
cone1 = cone(5,6)
cone1.surfacearea()
cone1.volume()
cone2 = cone(7,5)
cone2.volume()
cone2.surfacearea()
###Output
Volume of cone = 256.56340004316644 cubic units
Surface area of cone is-
Base = 153.93804002589985 sq.units
Conical Surface = 189.1750130391168 sq.units
|
notebooks/classification_notebook.ipynb | ###Markdown
Load an experimental data
###Code
# Load an example dataset
from sklearn.datasets import load_breast_cancer
dataset = load_breast_cancer()
X = dataset.data
y = dataset.target
feature_names = dataset.feature_names
target_names = dataset.target_names
###Output
_____no_output_____
###Markdown
Cross-validate an example classifier
###Code
from helpers.classification.validation import cross_validate_classifier
from sklearn.linear_model import LogisticRegression
# Initialize the classifier
classifier = LogisticRegression(random_state=seed, solver="lbfgs")
# Define the classification options
threshold = 0.5
metrics = ("acc", "sen", "spe")
num_folds = 10
num_repetitions = 20
# Cross-validate the classifier
results = cross_validate_classifier(X,
y,
classifier,
threshold=threshold,
metrics=metrics,
num_folds=num_folds,
num_repetitions=num_repetitions,
seed=seed)
print("-------------------------")
print("Cross-validation results:")
print("-------------------------")
print("")
for metric in metrics:
metric_avg = float(np.mean(results[metric]))
metric_std = float(np.std(results[metric]))
print("{} = {:.2f} +- {:.2f}".format(metric, metric_avg, metric_std))
###Output
-------------------------
Cross-validation results:
-------------------------
acc = 0.94 +- 0.00
sen = 0.96 +- 0.00
spe = 0.91 +- 0.01
###Markdown
Plot the classification graphs
###Code
from helpers.classification.visualization import plot_classification
from sklearn.linear_model import LogisticRegression
# Initialize the classifier
classifier = LogisticRegression(random_state=seed, solver="lbfgs")
# Get example feature
X_dim1 = X[:, 0]
X_dim2 = X[:, 1]
X_dim1_label = feature_names[0]
X_dim2_label = feature_names[1]
# Make sure X, y are 2-dimensional
X_temp = X_dim1.reshape((len(X_dim1), 1))
y_temp = y.reshape((len(y), 1))
# Fit the classifier
classifier.fit(X_temp, y_temp)
# Evaluate the classifier
y_hat = classifier.predict(X_temp)
# Plot the classification graph
plot_classification(X_dim1,
X_dim2,
y_temp,
y_hat,
metrics=("acc", "sen", "spe"),
fig_size=(12, 5),
fig_show=False,
save_as=None,
x_label=X_dim1_label,
y_label=X_dim2_label)
plt.savefig("classification_plots.png", bbox_inches="tight")
plt.show()
###Output
_____no_output_____
###Markdown
Load an experimental data
###Code
# Load an example dataset
from sklearn.datasets import load_breast_cancer
dataset = load_breast_cancer()
X = dataset.data
y = dataset.target
feature_names = dataset.feature_names
target_names = dataset.target_names
###Output
_____no_output_____
###Markdown
Cross-validate an example classifier
###Code
from helpers.classification.validation import cross_validate_classifier
from sklearn.linear_model import LogisticRegression
# Initialize the classifier
classifier = LogisticRegression(random_state=seed, solver="lbfgs")
# Define the classification options
threshold = 0.5
metrics = ("acc", "sen", "spe")
num_folds = 10
num_repetitions = 20
# Cross-validate the classifier
results = cross_validate_classifier(X,
y,
classifier,
threshold=threshold,
metrics=metrics,
num_folds=num_folds,
num_repetitions=num_repetitions,
seed=seed)
print("-------------------------")
print("Cross-validation results:")
print("-------------------------")
print("")
for metric in metrics:
metric_avg = float(np.mean(results[metric]))
metric_std = float(np.std(results[metric]))
print("{} = {:.2f} +- {:.2f}".format(metric, metric_avg, metric_std))
###Output
-------------------------
Cross-validation results:
-------------------------
acc = 0.94 +- 0.02
sen = 0.96 +- 0.04
spe = 0.91 +- 0.07
###Markdown
Plot the classification graphs
###Code
from helpers.classification.visualization import plot_classification
from sklearn.linear_model import LogisticRegression
# Initialize the classifier
classifier = LogisticRegression(random_state=seed, solver="lbfgs")
# Get example feature
X_dim1 = X[:, 0]
X_dim2 = X[:, 1]
X_dim1_label = feature_names[0]
X_dim2_label = feature_names[1]
# Make sure X, y are 2-dimensional
X_temp = X_dim1.reshape((len(X_dim1), 1))
y_temp = y.reshape((len(y), 1))
# Fit the classifier
classifier.fit(X_temp, y_temp)
# Evaluate the classifier
y_hat = classifier.predict(X_temp)
# Plot the classification graph
plot_classification(X_dim1,
X_dim2,
y_temp,
y_hat,
metrics=("acc", "sen", "spe"),
fig_size=(12, 5),
fig_show=True,
x_label=X_dim1_label,
y_label=X_dim2_label)
###Output
_____no_output_____ |
module3-cross-validation/DS_223_assignment.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 2, Module 3*--- Cross-Validation Assignment- [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.- [ ] Continue to participate in our Kaggle challenge. - [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)- [ ] Commit your notebook to your fork of the GitHub repo.**You can't just copy** from the lesson notebook to this assignment.- Because the lesson was **regression**, but the assignment is **classification.**- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.So you will have to adapt the example, which is good real-world practice.1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.htmlcommon-cases-predefined-values)4. If youโre doing a multi-class classification problem โ such as whether a waterpump is functional, functional needs repair, or nonfunctional โย then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)) Stretch Goals Reading- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85) Doing- Add your own stretch goals!- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for "Grid-Searching Which Model To Use" in Chapter 6:> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...The example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines? BONUS: Stacking!Here's some code you can use to "stack" multiple submissions, which is another form of ensembling:```pythonimport pandas as pd Filenames of your submissions you want to ensemblefiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']target = 'status_group'submissions = (pd.read_csv(file)[[target]] for file in files)ensemble = pd.concat(submissions, axis='columns')majority_vote = ensemble.mode(axis='columns')[0]sample_submission = pd.read_csv('sample_submission.csv')submission = sample_submission.copy()submission[target] = majority_votesubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)```
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
import numpy as np
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv')).set_index('id')
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
###Output
_____no_output_____
###Markdown
Wrangle DataImport Data
###Code
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv', index_col='id')
train.head(3)
###Output
_____no_output_____
###Markdown
EDA
###Code
# from pandas_profiling import ProfileReport
# profile = ProfileReport(train, minimal=True).to_notebook_iframe()
# profile
train.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 59400 entries, 0 to 59399
Data columns (total 41 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 59400 non-null int64
1 amount_tsh 59400 non-null float64
2 date_recorded 59400 non-null object
3 funder 55765 non-null object
4 gps_height 59400 non-null int64
5 installer 55745 non-null object
6 longitude 59400 non-null float64
7 latitude 59400 non-null float64
8 wpt_name 59400 non-null object
9 num_private 59400 non-null int64
10 basin 59400 non-null object
11 subvillage 59029 non-null object
12 region 59400 non-null object
13 region_code 59400 non-null int64
14 district_code 59400 non-null int64
15 lga 59400 non-null object
16 ward 59400 non-null object
17 population 59400 non-null int64
18 public_meeting 56066 non-null object
19 recorded_by 59400 non-null object
20 scheme_management 55523 non-null object
21 scheme_name 31234 non-null object
22 permit 56344 non-null object
23 construction_year 59400 non-null int64
24 extraction_type 59400 non-null object
25 extraction_type_group 59400 non-null object
26 extraction_type_class 59400 non-null object
27 management 59400 non-null object
28 management_group 59400 non-null object
29 payment 59400 non-null object
30 payment_type 59400 non-null object
31 water_quality 59400 non-null object
32 quality_group 59400 non-null object
33 quantity 59400 non-null object
34 quantity_group 59400 non-null object
35 source 59400 non-null object
36 source_type 59400 non-null object
37 source_class 59400 non-null object
38 waterpoint_type 59400 non-null object
39 waterpoint_type_group 59400 non-null object
40 status_group 59400 non-null object
dtypes: float64(3), int64(7), object(31)
memory usage: 19.0+ MB
###Markdown
Problems we need to deal with:High cardinalityZeroes an NaNNaNsFeatures constant values in columns
###Code
def wrangle (X):
X = X.copy()
# Latitude - replacing the funky latitude measurements with 0
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# Latitude and Longitude 0's to NaN
for col in ['latitude', 'longitude']:
X[col] = X[col].replace(0, np.nan)
# Drop high cardinality columns
hc_cols = [col for col in X.describe(include='object').columns
if X[col].nunique() > 100]
X = X.drop(hc_cols, axis=1) # Actually dropping hc_cols(high cardinality)
# Drop Columns (`quantity_group` is repeated with `quantity`)
X = X.drop(['quantity_group', 'recorded_by', 'payment_type', 'num_private',
'extraction_type_group', 'extraction_type_class', 'payment',
'source'], axis=1)
return X
train = wrangle(train)
test = wrangle(test)
train.head()
train['construction_year'] = train['construction_year'].replace(0, 1999)
###Output
_____no_output_____
###Markdown
Target vector feature matrix SPLIT
###Code
y = train['status_group']
X = train.drop('status_group', axis=1)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
###Output
_____no_output_____
###Markdown
Establish Baseline
###Code
print('Baseline Accuracy:', y_train.value_counts(normalize=True).max())
###Output
Baseline Accuracy: 0.5429713804713805
###Markdown
Build Model
###Code
# Import Libraries for building a model
from sklearn.pipeline import make_pipeline
from category_encoders import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from category_encoders import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
model = make_pipeline(
OneHotEncoder(use_cat_names=True),
SimpleImputer(),
StandardScaler(),
LogisticRegression(n_jobs=-1)
)
model.fit(X, y)
###Output
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
###Markdown
Check Metrics
###Code
print('Training Accuracy:', model.score(X_train, y_train))
print('Validation Accuracy:', model.score(X_val, y_val))
###Output
Training Accuracy: 0.7286405723905723
Validation Accuracy: 0.7296296296296296
###Markdown
Tune the Model
###Code
# Import GridSearchCV (cross-validation)
from sklearn.model_selection import GridSearchCV
rfc_model = make_pipeline(
OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(max_depth=18, n_jobs=-1, random_state=42) # <-- max_depth, n_estimators
)
# The ranges you want to test, as a dictionary
params = {'randomforestclassifier__n_estimators': range(50, 201, 50), # 4 options
'randomforestclassifier__max_depth': range(5, 26, 10)} # 3 options
# Create your gridsearch
gs = GridSearchCV(rfc_model,
param_grid=params,
n_jobs=-1,
verbose=1,
cv=5,
scoring='accuracy'
)
gs.fit(X_train, y_train);
# What are the best set of hyperparameters?
gs.best_params_
# What if I want to save my best model?
best_model = gs.best_estimator_
print('Training Accuracy:', best_model.score(X_train, y_train))
print('Validation Accuracy:', best_model.score(X_val, y_val))
best_model.fit(X_train, y_train)
print('Validation Accuracy', best_model.score(X_val, y_val))
y_pred = best_model.predict(X_val)
test['prediction'] = best_model.predict(test)
submission = test.filter(['prediction'], axis=1)
submission.columns =['status_group']
submission['id'] = submission.index
submission = submission.rename_axis('index1').reset_index()
submission.drop('index1',axis=1,inplace=True)
submission = submission[['id', 'status_group']]
submission
submission.to_csv(r'drew-submission-01-rfc.csv', index=False)
from google.colab import files
files.download('drew-submission-01-rfc.csv')
###Output
_____no_output_____ |
lectures/Week5 answers.ipynb | ###Markdown
OverviewWe're now switching focus away from the Network Science (for a little bit), beginning to think about _Natural Language Processing_ instead. In other words, today will be all about teaching your computer to "understand" text. This ties in nicely with our work on Reddit, because subbmisions and comments often contain text. We've looked at the network so far - now, let's see if we can include the text. Today is about * Installing the _natural language toolkit_ (NLTK) package and learning the basics of how it works (Chapter 1)* Figuring out how to make NLTK to work with other types of text (Chapter 2). > **_Video Lecture_**. [Intro to Natural Language processing](https://www.youtube.com/watch?v=Ph0EHmFT3n4). Today is all about working with NLTK, so not much lecturing - we will start with a perspective on text analysis by Sune (you will hear him talking about Wikipedia data here and there. Everything he sais applies to Reddit data as well!)
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("Ph0EHmFT3n4",width=800, height=450)
###Output
_____no_output_____
###Markdown
Installing and the basics> _Reading_> The reading for today is Natural Language Processing with Python (NLPP) Chapter 1, Sections 1.1, 1.2, 1.3\. [It's free online](http://www.nltk.org/book/). > *Exercises*: NLPP Chapter 1\.> > * First, install `nltk` if it isn't installed already (there are some tips below that I recommend checking out before doing installing)> * Second, work through chapter 1. The book is set up as a kind of tutorial with lots of examples for you to work through. I recommend you read the text with an open IPython Notebook and type out the examples that you see. ***It becomes much more fun if you to add a few variations and see what happens***. Some of those examples might very well be due as assignments (see below the install tips), so those ones should definitely be in a `notebook`. NLTK Install tips Check to see if `nltk` is installed on your system by typing `import nltk` in a `notebook`. If it's not already installed, install it as part of _Anaconda_ by typing conda install nltk at the command prompt. If you don't have them, you can download the various corpora using a command-line version of the downloader that runs in Python notebooks: In the iPython notebook, run the code import nltk nltk.download()Now you can hit `d` to download, then type "book" to fetch the collection needed today's `nltk` session. Now that everything is up and running, let's get to the actual exercises.
###Code
from nltk.book import *
def lexical_diversity(text):
return len(set(text)) / len(text)
def percentage(count, total):
return 100 * count / total
###Output
_____no_output_____
###Markdown
> *Exercises: NLPP Chapter 1 (the stuff that might be due in an upcoming assignment).> > The following exercises from Chapter 1 are what might be due in an assignment later on.> > * Try out the `concordance` method, using another text and a word of your own choosing.
###Code
# text6: Monty Python and the Holy Grail
text6.concordance('rabbit')
text6.concordance('holy')
###Output
Displaying 25 of 26 matches:
[ angels sing ] Arthur , this is the Holy Grail . Look well , Arthur , for it
rpose , Arthur ... the quest for the Holy Grail . [ boom ] [ singing stops ] L
he can join us in our quest for the Holy Grail . FRENCH GUARD : Well , I ' ll
gy was required if the quest for the Holy Grail were to be brought to a succes
GALAHAD : You are the keepers of the Holy Grail ? ZOOT : The what ? GALAHAD :
CELOT : No , we ' ve got to find the Holy Grail . Come on ! GALAHAD : Oh , let
uld be the sign that leads us to the Holy Grail ! Brave , brave Concorde , you
ung people in the joyful bond of the holy wedlock . Unfortunately , one of the
' ve not given up your quest for the Holy Grail ? MINSTREL : [ singing ] He is
TIM : I do . [ zoosh ] You seek the Holy Grail ! ARTHUR : That is our quest .
, we ' re -- we ' re looking for the Holy Grail . Our quest is to find the Hol
oly Grail . Our quest is to find the Holy Grail . KNIGHTS : Yeah . Yes . It is
TIM : Yes , I can help you find the Holy Grail . KNIGHTS : Oh , thank you . O
n the last resting place of the most Holy Grail . ARTHUR : Where could we find
RTHUR : No . LAUNCELOT : We have the Holy Hand Grenade . ARTHUR : Yes , of cou
ade . ARTHUR : Yes , of course ! The Holy Hand Grenade of Antioch ! ' Tis one
him ! Brother Maynard ! Bring up the Holy Hand Grenade ! MONKS : [ chanting ]
ng , ' First shalt thou take out the Holy Pin . Then , shalt thou count to thr
, be reached , then lobbest thou thy Holy Hand Grenade of Antioch towards thy
iant and pure of spirit may find the Holy Grail in the Castle of uuggggggh '.
on peril was no more . The quest for Holy Grail could continue . SCENE 23 : [
your quest ? LAUNCELOT : To seek the Holy Grail . BRIDGEKEEPER : What is your
is your quest ? ROBIN : To seek the Holy Grail . BRIDGEKEEPER : What is the c
is your quest ? ARTHUR : To seek the Holy Grail . BRIDGEKEEPER : What is the a
Thou hast vouchsafed to us the most holy -- [ twong ] [ baaaa ] Jesus Christ
###Markdown
> * Also try out the `similar` and `common_context` methods for a few of your own examples.
###Code
# text7: Wall Street Journal
# text4: Inaugural Address Corpus
text7.similar('opportunity')
text4.similar('opportunity')
text4.common_contexts(['country', 'war'])
###Output
the_and the_the the_has the_of a_which the_with the_for the_are of_and
the_had the_to the_be this_is
###Markdown
> * Create your own version of a dispersion plot ("your own version" means another text and different word).
###Code
text7.dispersion_plot(['buy', 'invest', 'work', 'produce', 'save'])
###Output
_____no_output_____
###Markdown
> * Explain in your own words what aspect of language _lexical diversity_ describes. - The diversity in the use of words in the text. - Calculated as the number of unique words divided by the total number of words in the text > * Create frequency distributions for `text2`, including the cumulative frequency plot for the 75 most common words.
###Code
import matplotlib.pylab as plt
FDist = FreqDist(text2)
fig, ax = plt.subplots(figsize=(15,5))
FDist.plot(75, cumulative=True)
plt.show()
###Output
_____no_output_____
###Markdown
> * What is a bigram? How does it relate to `collocations`. Explain in your own words. - A bigram is a word pair - A collocation is a word pair that occurs together very often. Where the meaning of the two words are strongly depending on their pairing. So they "resist" substitution. - Collocations are word pairs that occurs together frequently compared to how often the occur separatly > * Work through ex 2-12 in NLPP's section 1.8\. 2. Given an alphabet of 26 letters, there are 26 to the power 10, or 26 ** 10, ten-letter strings we can form. That works out to 141167095653376. How many hundred-letter strings are possible?
###Code
26**100
###Output
_____no_output_____
###Markdown
3. The Python multiplication operation can be applied to lists. What happens when you type \['Monty', 'Python'\] \* 20, or 3 \* sent1?
###Code
# Gets a list repeating 'Monty', 'Python' 20 times
repeat_20 = ['Monty', 'Python'] * 20
# Gets a list repeating sent 3 times
repeat_3 = 3*sent1
###Output
_____no_output_____
###Markdown
4. Review 1 on computing with language. How many words are there in text2? How many distinct words are there?
###Code
words_in_text2 = len(text2)
words_in_text2
distinct_words_in_text2 = len(set(text2))
distinct_words_in_text2
###Output
_____no_output_____
###Markdown
5. Compare the lexical diversity scores for humor and romance fiction in 1.1. Which genre is more lexically diverse? Humor has the highest lexical diversity of 0.231 compared to 0.121 in fiction: romance.This means humor has the highest proportion of unique words compared to the total number of words in the genre. 6. Produce a dispersion plot of the four main protagonists in Sense and Sensibility: Elinor, Marianne, Edward, and Willoughby. What can you observe about the different roles played by the males and females in this novel? Can you identify the couples?
###Code
text2.dispersion_plot(['Elinor', 'Marianne', 'Edward', 'Willoughby'])
###Output
_____no_output_____
###Markdown
Elinor might be the main character and Marianne could be Elinor's close friend.Then Edward and Willoughby could be flirts/boy friends.From the dispersion plot Edward seems to occur often when Elinor does, but Marianne does not. - So Edward might be Elinor's flirt/boy friend - Also Edward is frequent in the end with ElinorWilloughby occurs more often when Marianne does, so he might be her flirt/boy friend 7. Find the collocations in text5.
###Code
text5.collocations()
###Output
wanna chat; PART JOIN; MODE #14-19teens; JOIN PART; PART PART;
cute.-ass MP3; MP3 player; JOIN JOIN; times .. .; ACTION watches; guys
wanna; song lasts; last night; ACTION sits; -...)...- S.M.R.; Lime
Player; Player 12%; dont know; lez gurls; long time
###Markdown
8. Consider the following Python expression: len(set(text4)). State the purpose of this expression. Describe the two steps involved in performing this computation.* The set() function only takes one of each word, so it finds the different words in text4, but only once even if they occur more often. Then len() finds the length of the set, which gives the number of different words/tokens in text4 11. Define several variables containing lists of words, e.g., phrase1, phrase2, and so on. Join them together in various combinations (using the plus operator) to form whole sentences. What is the relationship between len(phrase1 + phrase2) and len(phrase1) + len(phrase2)?* len(phrase1 + phrase2) concatenates phrase1 and phrase2, and gives the total of number of words in phrase1 and phrase2* len(phrase1) + len(phrase2) also gives the total number of words the two phrases, but without concatenating them.it just counts the number of words in each separate sentence and then add them together 12. Consider the following two expressions, which have the same value. Which one will typically be more relevant in NLP? Why?* "Monty Python"[6:12]* ["Monty", "Python"][1]* The first will probably be more relevant in the beginning on raw text data. Here the data is often not processed, so you get everything as strings and need to extract parts of it from the raw data.Once the data is processed the second expression is probably more relevant, as we will often keep the different text representations as lists of words. - So it will depend on the work that you have to do with the text. > * Work through exercise 15, 17, 19, 22, 23, 26, 27, 28 in section 1.8\. 15. Review the discussion of conditionals in 4. Find all words in the Chat Corpus (text5) starting with the letter b. Show them in alphabetical order.
###Code
b_words = [w for w in text5 if w.startswith('b')]
b_words = set(sorted(b_words))
len(b_words)
#b_words
###Output
_____no_output_____
###Markdown
17. Use text9.index() to find the index of the word sunset. You'll need to insert this word as an argument between the parentheses. By a process of trial and error, find the slice for the complete sentence that contains this word.
###Code
sunset_index = text9.index('sunset')
buffer = 5
text9[sunset_index-buffer-3:sunset_index+buffer*3]
###Output
_____no_output_____
###Markdown
19. What is the difference between the following two lines? Which one will give a larger value? Will this be the case for other texts?* sorted(set(w.lower() for w in text1))* sorted(w.lower() for w in set(text1))* First of all the first line produces a sorted set, and the second line produces a sorted list* The first line makes all words in text1 into lower case, then takes the different words in the text and sorts them - This will mean that BIG and big are seen as the same by the set() function, so only one occurence of big at the end* The second line first takes all the different words in the text, then makes them lower case, and then sorts them - This means that BIG and big are seen as different words by the set and then made to lower afterwards. So for instance if there are occurences BIG, BiG, and big, then the final sorted list will have big in it 3 times. 22. Find all the four-letter words in the Chat Corpus (text5). With the help of a frequency distribution (FreqDist), show these words in decreasing order of frequency.
###Code
four_letter_words = [w for w in text5 if len(w) == 4]
len(four_letter_words)
Fdist_four_letter_words = FreqDist(four_letter_words)
Fdist_four_letter_words
import matplotlib.pylab as plt
fig, ax = plt.subplots(figsize=(15,5))
Fdist_four_letter_words.plot(50)
plt.show()
###Output
_____no_output_____
###Markdown
23. Review the discussion of looping with conditions in 4. Use a combination of for and if statements to loop over the words of the movie script for Monty Python and the Holy Grail (text6) and print all the uppercase words, one per line.
###Code
for w in text6:
if str.isupper(w):
print(w)
###Output
SCENE
KING
ARTHUR
SOLDIER
ARTHUR
I
SOLDIER
ARTHUR
I
I
SOLDIER
ARTHUR
SOLDIER
ARTHUR
SOLDIER
ARTHUR
SOLDIER
ARTHUR
SOLDIER
ARTHUR
SOLDIER
ARTHUR
SOLDIER
ARTHUR
SOLDIER
A
ARTHUR
SOLDIER
A
ARTHUR
SOLDIER
ARTHUR
SOLDIER
I
ARTHUR
I
SOLDIER
SOLDIER
SOLDIER
I
ARTHUR
SOLDIER
SOLDIER
SOLDIER
SOLDIER
SOLDIER
SOLDIER
SOLDIER
SOLDIER
SCENE
CART
MASTER
CUSTOMER
CART
MASTER
DEAD
PERSON
I
CART
MASTER
CUSTOMER
DEAD
PERSON
I
CART
MASTER
CUSTOMER
DEAD
PERSON
I
CART
MASTER
CUSTOMER
DEAD
PERSON
I
CUSTOMER
CART
MASTER
I
DEAD
PERSON
I
CUSTOMER
CART
MASTER
I
DEAD
PERSON
I
CUSTOMER
CART
MASTER
I
CUSTOMER
CART
MASTER
I
CUSTOMER
CART
MASTER
DEAD
PERSON
I
I
CUSTOMER
DEAD
PERSON
I
I
CUSTOMER
CART
MASTER
CUSTOMER
CART
MASTER
I
CUSTOMER
CART
MASTER
SCENE
ARTHUR
DENNIS
ARTHUR
DENNIS
I
ARTHUR
I
DENNIS
I
I
ARTHUR
I
DENNIS
ARTHUR
I
DENNIS
ARTHUR
I
DENNIS
I
ARTHUR
I
DENNIS
WOMAN
ARTHUR
I
WOMAN
ARTHUR
WOMAN
ARTHUR
I
WOMAN
I
I
DENNIS
A
WOMAN
DENNIS
ARTHUR
I
WOMAN
ARTHUR
WOMAN
ARTHUR
DENNIS
I
ARTHUR
DENNIS
ARTHUR
I
DENNIS
ARTHUR
DENNIS
ARTHUR
I
WOMAN
ARTHUR
I
WOMAN
I
ARTHUR
WOMAN
ARTHUR
I
I
DENNIS
ARTHUR
DENNIS
ARTHUR
DENNIS
I
I
I
ARTHUR
DENNIS
ARTHUR
DENNIS
I
ARTHUR
DENNIS
I
SCENE
BLACK
KNIGHT
BLACK
KNIGHT
GREEN
KNIGHT
BLACK
KNIGHT
GREEN
KNIGHT
BLACK
KNIGHT
BLACK
KNIGHT
GREEN
KNIGHT
GREEN
KNIGHT
BLACK
KNIGHT
GREEN
KNIGHT
BLACK
KNIGHT
ARTHUR
I
I
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
I
I
BLACK
KNIGHT
ARTHUR
I
BLACK
KNIGHT
I
ARTHUR
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
A
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
I
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
I
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
ARTHUR
I
ARTHUR
BLACK
KNIGHT
BLACK
KNIGHT
I
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
I
ARTHUR
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
BLACK
KNIGHT
ARTHUR
BLACK
KNIGHT
I
I
SCENE
MONKS
CROWD
A
A
A
A
MONKS
CROWD
A
A
A
A
A
A
A
A
A
A
A
A
A
VILLAGER
CROWD
BEDEVERE
VILLAGER
CROWD
BEDEVERE
WITCH
I
I
BEDEVERE
WITCH
CROWD
WITCH
BEDEVERE
VILLAGER
BEDEVERE
VILLAGER
VILLAGER
CROWD
BEDEVERE
VILLAGER
VILLAGER
VILLAGER
VILLAGER
VILLAGERS
VILLAGER
VILLAGER
VILLAGER
VILLAGER
A
VILLAGERS
A
VILLAGER
A
VILLAGER
RANDOM
BEDEVERE
VILLAGER
BEDEVERE
A
VILLAGER
I
VILLAGER
VILLAGER
CROWD
BEDEVERE
VILLAGER
VILLAGER
VILLAGER
CROWD
BEDEVERE
VILLAGER
VILLAGER
CROWD
BEDEVERE
VILLAGER
VILLAGER
VILLAGER
BEDEVERE
VILLAGER
B
BEDEVERE
CROWD
BEDEVERE
VILLAGER
BEDEVERE
VILLAGER
RANDOM
BEDEVERE
VILLAGER
VILLAGER
VILLAGER
CROWD
BEDEVERE
VILLAGER
VILLAGER
VILLAGER
VILLAGER
VILLAGER
VILLAGER
VILLAGER
VILLAGER
VILLAGER
ARTHUR
A
CROWD
BEDEVERE
VILLAGER
BEDEVERE
VILLAGER
A
VILLAGER
A
CROWD
A
A
VILLAGER
BEDEVERE
CROWD
BEDEVERE
CROWD
A
A
A
WITCH
VILLAGER
CROWD
BEDEVERE
ARTHUR
I
BEDEVERE
ARTHUR
BEDEVERE
I
ARTHUR
BEDEVERE
ARTHUR
I
NARRATOR
SCENE
SIR
BEDEVERE
ARTHUR
BEDEVERE
SIR
LAUNCELOT
ARTHUR
SIR
GALAHAD
LAUNCELOT
PATSY
ARTHUR
I
KNIGHTS
PRISONER
KNIGHTS
MAN
I
ARTHUR
KNIGHTS
SCENE
GOD
I
ARTHUR
GOD
I
I
ARTHUR
I
O
GOD
ARTHUR
GOD
ARTHUR
O
GOD
LAUNCELOT
A
A
GALAHAD
SCENE
ARTHUR
FRENCH
GUARD
ARTHUR
FRENCH
GUARD
ARTHUR
FRENCH
GUARD
I
I
ARTHUR
GALAHAD
ARTHUR
FRENCH
GUARD
I
ARTHUR
FRENCH
GUARD
ARTHUR
FRENCH
GUARD
I
I
GALAHAD
FRENCH
GUARD
ARTHUR
FRENCH
GUARD
I
GALAHAD
ARTHUR
FRENCH
GUARD
I
I
GALAHAD
FRENCH
GUARD
I
ARTHUR
I
FRENCH
GUARD
OTHER
FRENCH
GUARD
FRENCH
GUARD
ARTHUR
I
KNIGHTS
ARTHUR
KNIGHTS
FRENCH
GUARD
FRENCH
GUARD
ARTHUR
KNIGHTS
FRENCH
GUARD
FRENCH
GUARDS
LAUNCELOT
I
ARTHUR
BEDEVERE
I
FRENCH
GUARDS
C
A
ARTHUR
BEDEVERE
I
ARTHUR
BEDEVERE
U
I
ARTHUR
BEDEVERE
ARTHUR
KNIGHTS
CRASH
FRENCH
GUARDS
SCENE
VOICE
DIRECTOR
HISTORIAN
KNIGHT
KNIGHT
HISTORIAN
HISTORIAN
S
WIFE
SCENE
NARRATOR
MINSTREL
O
SIR
ROBIN
DENNIS
WOMAN
ALL
HEADS
MINSTREL
ROBIN
I
ALL
HEADS
MINSTREL
ROBIN
I
ALL
HEADS
I
ROBIN
W
I
I
ALL
HEADS
ROBIN
I
LEFT
HEAD
I
MIDDLE
HEAD
I
RIGHT
HEAD
I
MIDDLE
HEAD
I
LEFT
HEAD
I
RIGHT
HEAD
LEFT
HEAD
ROBIN
I
LEFT
HEAD
I
RIGHT
HEAD
MIDDLE
HEAD
LEFT
HEAD
RIGHT
HEAD
MIDDLE
HEAD
LEFT
HEAD
MIDDLE
HEAD
LEFT
HEAD
I
MIDDLE
HEAD
RIGHT
HEAD
LEFT
HEAD
MIDDLE
HEAD
RIGHT
HEAD
LEFT
HEAD
ALL
HEADS
MIDDLE
HEAD
RIGHT
HEAD
MINSTREL
ROBIN
MINSTREL
ROBIN
I
MINSTREL
ROBIN
MINSTREL
ROBIN
I
MINSTREL
ROBIN
I
MINSTREL
ROBIN
MINSTREL
ROBIN
I
CARTOON
MONKS
CARTOON
CHARACTER
CARTOON
MONKS
CARTOON
CHARACTERS
CARTOON
MONKS
CARTOON
CHARACTER
VOICE
CARTOON
CHARACTER
SCENE
NARRATOR
GALAHAD
GIRLS
ZOOT
GALAHAD
ZOOT
GALAHAD
ZOOT
GALAHAD
ZOOT
MIDGET
CRAPPER
O
ZOOT
MIDGET
CRAPPER
ZOOT
GALAHAD
I
I
ZOOT
GALAHAD
ZOOT
GALAHAD
ZOOT
GALAHAD
I
ZOOT
GALAHAD
I
I
ZOOT
I
GALAHAD
ZOOT
PIGLET
GALAHAD
ZOOT
GALAHAD
B
ZOOT
WINSTON
GALAHAD
PIGLET
GALAHAD
PIGLET
GALAHAD
I
PIGLET
GALAHAD
I
PIGLET
GALAHAD
I
I
I
GIRLS
GALAHAD
GIRLS
GALAHAD
DINGO
I
GALAHAD
I
DINGO
GALAHAD
I
I
DINGO
GALAHAD
DINGO
I
GALAHAD
DINGO
I
LEFT
HEAD
DENNIS
OLD
MAN
TIM
THE
ENCHANTER
ARMY
OF
KNIGHTS
DINGO
I
GOD
DINGO
GIRLS
A
A
DINGO
AMAZING
STUNNER
LOVELY
DINGO
GIRLS
A
A
DINGO
GIRLS
GALAHAD
I
LAUNCELOT
GALAHAD
LAUNCELOT
GALAHAD
LAUNCELOT
GALAHAD
LAUNCELOT
DINGO
LAUNCELOT
GALAHAD
LAUNCELOT
GALAHAD
I
LAUNCELOT
GIRLS
GALAHAD
I
DINGO
GIRLS
LAUNCELOT
GALAHAD
I
I
DINGO
GIRLS
LAUNCELOT
GALAHAD
I
DINGO
GIRLS
DINGO
LAUNCELOT
GALAHAD
I
I
LAUNCELOT
GALAHAD
LAUNCELOT
GALAHAD
I
LAUNCELOT
GALAHAD
LAUNCELOT
GALAHAD
I
LAUNCELOT
I
NARRATOR
I
I
CROWD
NARRATOR
I
SCENE
OLD
MAN
ARTHUR
OLD
MAN
ARTHUR
OLD
MAN
ARTHUR
OLD
MAN
ARTHUR
OLD
MAN
ARTHUR
OLD
MAN
ARTHUR
OLD
MAN
SCENE
HEAD
KNIGHT
OF
NI
KNIGHTS
OF
NI
ARTHUR
HEAD
KNIGHT
RANDOM
ARTHUR
HEAD
KNIGHT
BEDEVERE
HEAD
KNIGHT
RANDOM
ARTHUR
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
KNIGHTS
OF
NI
ARTHUR
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
ARTHUR
A
KNIGHTS
OF
NI
ARTHUR
PARTY
ARTHUR
HEAD
KNIGHT
ARTHUR
O
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
CARTOON
CHARACTER
SUN
CARTOON
CHARACTER
SUN
CARTOON
CHARACTER
SUN
CARTOON
CHARACTER
SCENE
NARRATOR
FATHER
PRINCE
HERBERT
FATHER
HERBERT
FATHER
HERBERT
B
I
FATHER
I
I
I
I
I
I
HERBERT
I
I
FATHER
HERBERT
I
FATHER
I
HERBERT
B
I
FATHER
HERBERT
FATHER
HERBERT
I
FATHER
HERBERT
I
I
I
FATHER
I
GUARD
GUARD
FATHER
I
GUARD
FATHER
GUARD
GUARD
FATHER
GUARD
FATHER
GUARD
FATHER
GUARD
GUARD
FATHER
GUARD
FATHER
GUARD
FATHER
GUARD
FATHER
GUARD
FATHER
GUARD
I
FATHER
N
GUARD
FATHER
GUARD
FATHER
GUARD
GUARD
FATHER
GUARD
FATHER
GUARD
GUARD
FATHER
GUARD
FATHER
GUARD
FATHER
GUARD
GUARD
GUARD
I
FATHER
GUARD
GUARD
FATHER
GUARD
FATHER
I
GUARD
I
HERBERT
FATHER
GUARD
FATHER
SCENE
LAUNCELOT
CONCORDE
LAUNCELOT
CONCORDE
LAUNCELOT
I
I
A
A
CONCORDE
I
I
LAUNCELOT
CONCORDE
I
I
I
I
I
LAUNCELOT
I
CONCORDE
I
I
LAUNCELOT
I
I
CONCORDE
LAUNCELOT
CONCORDE
I
LAUNCELOT
CONCORDE
I
I
I
SCENE
PRINCESS
LUCKY
GIRLS
GUEST
SENTRY
SENTRY
SENTRY
LAUNCELOT
SENTRY
LAUNCELOT
PRINCESS
LUCKY
GIRLS
LAUNCELOT
GUESTS
LAUNCELOT
GUARD
LAUNCELOT
O
I
I
HERBERT
LAUNCELOT
I
I
HERBERT
LAUNCELOT
I
HERBERT
I
I
LAUNCELOT
I
HERBERT
FATHER
HERBERT
I
FATHER
LAUNCELOT
I
HERBERT
LAUNCELOT
FATHER
LAUNCELOT
FATHER
LAUNCELOT
I
I
HERBERT
I
FATHER
LAUNCELOT
I
FATHER
I
HERBERT
FATHER
LAUNCELOT
I
FATHER
LAUNCELOT
FATHER
LAUNCELOT
I
I
I
FATHER
HERBERT
LAUNCELOT
I
FATHER
LAUNCELOT
HERBERT
I
FATHER
LAUNCELOT
HERBERT
I
LAUNCELOT
I
HERBERT
LAUNCELOT
I
I
I
FATHER
HERBERT
SCENE
GUESTS
FATHER
GUEST
FATHER
LAUNCELOT
FATHER
LAUNCELOT
I
I
I
GUEST
GUESTS
FATHER
LAUNCELOT
GUEST
GUESTS
FATHER
GUESTS
FATHER
I
I
GUEST
FATHER
GUEST
FATHER
BRIDE
S
FATHER
GUEST
FATHER
I
I
LAUNCELOT
GUEST
GUESTS
CONCORDE
HERBERT
I
FATHER
HERBERT
I
FATHER
HERBERT
I
FATHER
GUESTS
FATHER
GUESTS
FATHER
GUESTS
FATHER
GUESTS
FATHER
GUESTS
CONCORDE
GUESTS
CONCORDE
GUESTS
LAUNCELOT
GUESTS
LAUNCELOT
I
GUESTS
CONCORDE
LAUNCELOT
GUESTS
LAUNCELOT
GUESTS
LAUNCELOT
SCENE
ARTHUR
OLD
CRONE
ARTHUR
CRONE
ARTHUR
I
CRONE
ARTHUR
CRONE
ARTHUR
CRONE
BEDEVERE
ARTHUR
BEDEVERE
ARTHUR
BEDEVERE
ARTHUR
BEDEVERE
ARTHUR
BEDEVERE
ARTHUR
ARTHUR
BEDEVERE
CRONE
BEDEVERE
ARTHUR
CRONE
BEDEVERE
ARTHUR
BEDEVERE
ARTHUR
BEDEVERE
ROGER
THE
SHRUBBER
ARTHUR
ROGER
ARTHUR
ROGER
I
I
BEDEVERE
ARTHUR
SCENE
ARTHUR
O
HEAD
KNIGHT
I
ARTHUR
HEAD
KNIGHT
KNIGHTS
OF
NI
HEAD
KNIGHT
RANDOM
HEAD
KNIGHT
ARTHUR
O
HEAD
KNIGHT
ARTHUR
RANDOM
HEAD
KNIGHT
KNIGHTS
OF
NI
A
A
A
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
ARTHUR
KNIGHTS
OF
NI
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
I
ARTHUR
KNIGHTS
OF
NI
HEAD
KNIGHT
ARTHUR
KNIGHTS
OF
NI
HEAD
KNIGHT
KNIGHTS
OF
NI
BEDEVERE
MINSTREL
ARTHUR
ROBIN
HEAD
KNIGHT
ARTHUR
MINSTREL
ROBIN
HEAD
KNIGHT
KNIGHTS
OF
NI
ROBIN
I
KNIGHTS
OF
NI
ROBIN
ARTHUR
KNIGHTS
OF
NI
HEAD
KNIGHT
ARTHUR
KNIGHTS
OF
NI
HEAD
KNIGHT
ARTHUR
HEAD
KNIGHT
I
I
I
KNIGHTS
OF
NI
NARRATOR
KNIGHTS
NARRATOR
MINSTREL
NARRATOR
KNIGHTS
NARRATOR
A
CARTOON
CHARACTER
NARRATOR
CARTOON
CHARACTER
NARRATOR
CARTOON
CHARACTER
NARRATOR
CARTOON
CHARACTER
NARRATOR
CARTOON
CHARACTER
NARRATOR
SCENE
KNIGHTS
ARTHUR
TIM
THE
ENCHANTER
I
ARTHUR
TIM
ARTHUR
TIM
ARTHUR
TIM
I
ARTHUR
O
TIM
ROBIN
ARTHUR
KNIGHTS
ARTHUR
BEDEVERE
GALAHAD
ROBIN
BEDEVERE
ROBIN
BEDEVERE
ARTHUR
GALAHAD
ARTHUR
I
I
TIM
A
ARTHUR
A
TIM
A
ARTHUR
I
ROBIN
Y
ARTHUR
GALAHAD
KNIGHTS
TIM
ROBIN
ARTHUR
ROBIN
GALAHAD
ARTHUR
ROBIN
KNIGHTS
ARTHUR
TIM
I
KNIGHTS
TIM
ARTHUR
O
TIM
ARTHUR
SCENE
GALAHAD
ARTHUR
TIM
ARTHUR
GALAHAD
ARTHUR
W
TIM
ARTHUR
TIM
ARTHUR
TIM
ARTHUR
TIM
ARTHUR
TIM
ARTHUR
TIM
ARTHUR
TIM
ROBIN
I
I
TIM
GALAHAD
TIM
GALAHAD
ROBIN
TIM
I
ROBIN
TIM
ARTHUR
BORS
TIM
BORS
ARTHUR
TIM
I
ROBIN
I
TIM
I
I
ARTHUR
TIM
ARTHUR
TIM
KNIGHTS
KNIGHTS
ARTHUR
KNIGHTS
TIM
ARTHUR
LAUNCELOT
GALAHAD
ARTHUR
GALAHAD
ARTHUR
ROBIN
ARTHUR
GALAHAD
ARTHUR
GALAHAD
LAUNCELOT
ARTHUR
LAUNCELOT
ARTHUR
MONKS
ARTHUR
LAUNCELOT
I
ARTHUR
BROTHER
MAYNARD
SECOND
BROTHER
O
MAYNARD
SECOND
BROTHER
MAYNARD
KNIGHTS
ARTHUR
GALAHAD
ARTHUR
SCENE
ARTHUR
LAUNCELOT
GALAHAD
ARTHUR
MAYNARD
GALAHAD
LAUNCELOT
ARTHUR
MAYNARD
ARTHUR
MAYNARD
BEDEVERE
MAYNARD
LAUNCELOT
MAYNARD
ARTHUR
MAYNARD
GALAHAD
ARTHUR
MAYNARD
LAUNCELOT
ARTHUR
BEDEVERE
GALAHAD
BEDEVERE
I
LAUNCELOT
ARTHUR
LAUNCELOT
KNIGHTS
BEDEVERE
LAUNCELOT
BEDEVERE
N
LAUNCELOT
BEDEVERE
I
ARTHUR
GALAHAD
MAYNARD
BROTHER
MAYNARD
BEDEVERE
ARTHUR
KNIGHTS
BEDEVERE
KNIGHTS
NARRATOR
ANIMATOR
NARRATOR
SCENE
GALAHAD
ARTHUR
ROBIN
ARTHUR
BEDEVERE
ARTHUR
GALAHAD
ARTHUR
GALAHAD
ARTHUR
ROBIN
ARTHUR
ROBIN
I
GALAHAD
ARTHUR
ROBIN
ARTHUR
ROBIN
I
LAUNCELOT
I
I
ARTHUR
GALAHAD
ARTHUR
LAUNCELOT
I
ARTHUR
BRIDGEKEEPER
LAUNCELOT
I
BRIDGEKEEPER
LAUNCELOT
BRIDGEKEEPER
LAUNCELOT
BRIDGEKEEPER
LAUNCELOT
BRIDGEKEEPER
LAUNCELOT
ROBIN
BRIDGEKEEPER
ROBIN
I
BRIDGEKEEPER
ROBIN
BRIDGEKEEPER
ROBIN
BRIDGEKEEPER
ROBIN
I
BRIDGEKEEPER
GALAHAD
BRIDGEKEEPER
GALAHAD
I
BRIDGEKEEPER
GALAHAD
BRIDGEKEEPER
ARTHUR
BRIDGEKEEPER
ARTHUR
BRIDGEKEEPER
ARTHUR
BRIDGEKEEPER
I
I
BEDEVERE
ARTHUR
SCENE
ARTHUR
BEDEVERE
ARTHUR
BEDEVERE
ARTHUR
FRENCH
GUARD
ARTHUR
I
FRENCH
GUARD
I
I
ARTHUR
FRENCH
GUARD
I
ARTHUR
FRENCH
GUARDS
ARTHUR
FRENCH
GUARD
ARTHUR
FRENCH
GUARD
FRENCH
GUARDS
ARTHUR
BEDEVERE
ARTHUR
FRENCH
GUARDS
ARTHUR
FRENCH
GUARDS
ARTHUR
FRENCH
GUARDS
ARTHUR
ARMY
OF
KNIGHTS
HISTORIAN
S
WIFE
I
INSPECTOR
OFFICER
HISTORIAN
S
WIFE
OFFICER
INSPECTOR
OFFICER
BEDEVERE
INSPECTOR
OFFICER
INSPECTOR
OFFICER
OFFICER
RANDOM
RANDOM
OFFICER
OFFICER
OFFICER
OFFICER
INSPECTOR
OFFICER
CAMERAMAN
###Markdown
26. What does the following Python code do? sum(len(w) for w in text1) Can you use it to work out the average word length of a text?* It adds up the lengths of all words in text1. So we get the sum of all word lengths in text1 27. Define a function called vocab_size(text) that has a single parameter for the text, and which returns the vocabulary size of the text.
###Code
def vocab_size(text):
'''
Calculates the vocabulary size, meaning the number of different words (case insensitive) in a text
'''
return len(set(w.lower() for w in text))
vocab_size(text5)
###Output
_____no_output_____
###Markdown
28. Define a function percent(word, text) that calculates how often a given word occurs in a text, and expresses the result as a percentage.
###Code
def percent(word, text):
word_occurences = sum([w == word for w in text])
return word_occurences/len(text)
percent('rabbit', text6)
###Output
_____no_output_____
###Markdown
Working with NLTK and other types of textChapter 2 in NLPP1e is all about getting access to nicely curated texts that you can find built into NLTK. > > Reading: NLPP Chapter 2.1 - 2.4\.> > *Exercises*: NLPP Chapter 2\.> > * Solve exercise 4, 8, 11, 15, 16, 17, 18 in NLPP1e, section 2.8\. As always, I recommend you write up your solutions nicely in a `notebook`.
###Code
import nltk
nltk.corpus.gutenberg.words('austen-emma.txt')
from nltk.corpus import gutenberg
gutenberg.fileids()
###Output
_____no_output_____
###Markdown
4. Read in the texts of the State of the Union addresses, using the state_union corpus reader. Count occurrences of men, women, and people in each document. What has happened to the usage of these words over time?
###Code
from nltk.corpus import state_union
print('Women Count | Men Count | People Count | Text')
for fileid in state_union.fileids():
women_count = len([w for w in state_union.words(fileid) if w.lower() == 'women'])
men_count = len([w for w in state_union.words(fileid) if w.lower() == 'men'])
people_count = len([w for w in state_union.words(fileid) if w.lower() == 'people'])
print(f'\t{women_count} \t\t {men_count} \t\t {people_count} \t\t {fileid}')
###Output
Women Count | Men Count | People Count | Text
2 2 10 1945-Truman.txt
7 12 49 1946-Truman.txt
2 7 12 1947-Truman.txt
1 5 22 1948-Truman.txt
1 2 15 1949-Truman.txt
2 6 15 1950-Truman.txt
2 8 10 1951-Truman.txt
0 3 17 1953-Eisenhower.txt
0 2 15 1954-Eisenhower.txt
0 4 26 1955-Eisenhower.txt
2 2 30 1956-Eisenhower.txt
2 5 11 1957-Eisenhower.txt
1 2 19 1958-Eisenhower.txt
1 4 11 1959-Eisenhower.txt
0 2 10 1960-Eisenhower.txt
0 6 10 1961-Kennedy.txt
2 6 10 1962-Kennedy.txt
0 0 3 1963-Johnson.txt
5 8 12 1963-Kennedy.txt
1 3 3 1964-Johnson.txt
0 7 16 1965-Johnson-1.txt
3 12 14 1965-Johnson-2.txt
1 12 35 1966-Johnson.txt
1 11 25 1967-Johnson.txt
0 4 17 1968-Johnson.txt
2 5 6 1969-Johnson.txt
0 2 23 1970-Nixon.txt
0 1 32 1971-Nixon.txt
0 1 7 1972-Nixon.txt
0 1 9 1973-Nixon.txt
0 0 20 1974-Nixon.txt
0 0 14 1975-Ford.txt
1 3 18 1976-Ford.txt
1 2 19 1977-Ford.txt
1 0 26 1978-Carter.txt
1 0 15 1979-Carter.txt
2 1 12 1980-Carter.txt
1 1 11 1981-Reagan.txt
2 1 17 1982-Reagan.txt
7 3 19 1983-Reagan.txt
5 3 27 1984-Reagan.txt
1 1 12 1985-Reagan.txt
2 2 14 1986-Reagan.txt
0 1 24 1987-Reagan.txt
0 1 17 1988-Reagan.txt
3 2 13 1989-Bush.txt
2 3 9 1990-Bush.txt
2 2 14 1991-Bush-1.txt
7 7 13 1991-Bush-2.txt
4 4 27 1992-Bush.txt
2 1 45 1993-Clinton.txt
1 1 66 1994-Clinton.txt
3 1 73 1995-Clinton.txt
3 2 43 1996-Clinton.txt
2 1 31 1997-Clinton.txt
2 2 22 1998-Clinton.txt
3 2 22 1999-Clinton.txt
7 5 41 2000-Clinton.txt
3 3 15 2001-GWBush-1.txt
3 1 12 2001-GWBush-2.txt
6 3 14 2002-GWBush.txt
4 6 33 2003-GWBush.txt
8 7 21 2004-GWBush.txt
11 8 18 2005-GWBush.txt
7 7 22 2006-GWBush.txt
###Markdown
women gets more mention through the year until they get to around the same as men.people is mentioned quite differently depending on the sitting president. 8. Define a conditional frequency distribution over the Names corpus that allows you to see which initial letters are more frequent for males vs. females
###Code
import nltk
from nltk.corpus import names
cfd = nltk.ConditionalFreqDist(
(fileid, w[0])
for fileid in names.fileids()
for w in names.words(fileid))
import matplotlib.pylab as plt
fig, ax = plt.subplots(figsize=(15,5))
cfd.plot()
plt.show()
###Output
_____no_output_____
###Markdown
11. Investigate the table of modal distributions and look for other patterns. Try to explain them in terms of your own impressionistic understanding of the different genres. Can you find other closed classes of words that exhibit significant differences across different genres? 15. Write a program to find all words that occur at least three times in the Brown Corpus.
###Code
import nltk
from nltk.corpus import brown
cfd = nltk.FreqDist(brown.words())
plus_three_ocurrences = [w for w, count in cfd.items() if count >= 3]
len(plus_three_ocurrences)
###Output
_____no_output_____
###Markdown
16. Write a program to generate a table of lexical diversity scores (i.e. token/type ratios), as we saw in 1.1. Include the full set of Brown Corpus genres (nltk.corpus.brown.categories()). Which genre has the lowest diversity (greatest number of tokens per type)? Is this what you would have expected?
###Code
import nltk
from nltk.corpus import brown
lexical_diversity_table = {
genre: lexical_diversity(brown.raw(categories=genre))
for genre in brown.categories()
}
lexical_diversity_table
###Output
_____no_output_____
###Markdown
17. Write a function that finds the 50 most frequently occurring words of a text that are not stopwords.
###Code
import nltk
from nltk.corpus import stopwords
stp_words = stopwords.words('english')+['.', ',', ';', '-', "'", '"', '--', '?', '!', '."', '?"', '(', ')', ',"', '!"']
def top_non_stopwords(text, top=50):
cfd = nltk.FreqDist(
w for w in text if w.lower() not in stp_words)
return list(cfd)[:top]
top_non_stopwords(text1)
###Output
_____no_output_____
###Markdown
18. Write a program to print the 50 most frequent bigrams (pairs of adjacent words) of a text, omitting bigrams that contain stopwords.
###Code
import nltk
from nltk.corpus import stopwords
stp_words = stopwords.words('english')+['.', ',', ';', '-', "'", '"', '--', '?', '!', '."', '?"', '(', ')', ',"', '!"']
def top_non_stopword_bigrams(text, top=50):
cfd = nltk.FreqDist(
b for b in nltk.bigrams(text) if b[0].lower() not in stp_words
and b[1].lower() not in stp_words)
return list(cfd)[:top]
top_non_stopword_bigrams(text1)
###Output
_____no_output_____ |
Regression/Decision_Tree_Regressor.ipynb | ###Markdown
Decision Tree Regression Importing the libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the dataset
###Code
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
###Output
_____no_output_____
###Markdown
Training the Decision Tree Regression model on the whole dataset
###Code
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
###Output
_____no_output_____
###Markdown
Predicting a new result
###Code
regressor.predict([[6.5]])
###Output
_____no_output_____
###Markdown
Visualising the Decision Tree Regression results (higher resolution)
###Code
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
###Output
_____no_output_____
###Markdown
Visualising the Decision Tree Regression results (Lower resolution)
###Code
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
###Output
_____no_output_____ |
samples/04_gis_analysts_data_scientists/calculating_nXn_od_cost_matrix.ipynb | ###Markdown
Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial).
###Code
import arcgis
from arcgis.gis import GIS
import pandas as pd
import datetime
import getpass
from IPython.display import HTML
from arcgis import geocoding
from arcgis.features import Feature, FeatureSet
from arcgis.features import GeoAccessor, GeoSeriesAccessor
portal_url = 'https://wwww.arcgis.com'
#connect to your GIS
user_name = '<user_name>'
password = '<password>'
my_gis = GIS(portal_url, user_name, password)
###Output
_____no_output_____
###Markdown
We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits).
###Code
origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346']
origin_features = []
for origin in origin_coords:
reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0],
"y": origin.split(',')[1]})
origin_feature = Feature(geometry=reverse_geocode['location'],
attributes=reverse_geocode['address'])
origin_features.append(origin_feature)
origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint',
spatial_reference={'latestWkid': 4326})
origin_fset
###Output
_____no_output_____
###Markdown
Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits.
###Code
# Read csv files from data:
destinations_address = r"data/destinations_address.csv"
destinations_df = pd.read_csv(destinations_address)
destinations_sdf = pd.DataFrame.spatial.from_df(destinations_fc, "Address")
destinations_sdf.head()
destinations_fset = destinations_sdf.spatial.to_featureset()
destinations_fset
###Output
_____no_output_____
###Markdown
With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance.
###Code
%%time
# solve OD cost matrix tool for the origns and destinations
from arcgis.network.analysis import generate_origin_destination_cost_matrix
results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong,
destinations= destinations_fset, #destinations_fs_address,
cutoff=200,
origin_destination_line_shape='Straight Line')
print('Analysis succeeded? {}'.format(results.solve_succeeded))
###Output
Analysis succeeded? True
CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms
Wall time: 24.7 s
###Markdown
Let's see the output lines table.
###Code
od_df = results.output_origin_destination_lines.sdf
od_df
###Output
_____no_output_____
###Markdown
Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that.
###Code
# filter only the required columns
od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']]
# user pivot_table
od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID')
od_pivot
###Output
_____no_output_____
###Markdown
Write the pivot table to disk
###Code
od_pivot.to_csv('data/OD_Matrix.csv')
###Output
_____no_output_____
###Markdown
This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm.
###Code
od_map = my_gis.map('Loma Linda, CA')
od_map
od_map.draw(results.output_origin_destination_lines)
od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10})
od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8})
###Output
_____no_output_____
###Markdown
Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial).
###Code
import arcgis
from arcgis.gis import GIS
import pandas as pd
import datetime
import getpass
from IPython.display import HTML
from arcgis import geocoding
from arcgis.features import Feature, FeatureSet
from arcgis.features import GeoAccessor, GeoSeriesAccessor
portal_url = 'https://wwww.arcgis.com'
#connect to your GIS
user_name = '<user_name>'
password = '<password>'
my_gis = GIS(portal_url, user_name, password)
###Output
_____no_output_____
###Markdown
We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits).
###Code
origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346']
origin_features = []
for origin in origin_coords:
reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0],
"y": origin.split(',')[1]})
origin_feature = Feature(geometry=reverse_geocode['location'],
attributes=reverse_geocode['address'])
origin_features.append(origin_feature)
origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint',
spatial_reference={'latestWkid': 4326})
origin_fset
###Output
_____no_output_____
###Markdown
Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits.
###Code
# Read csv files from data:
destinations_address = r"data/destinations_address.csv"
destinations_df = pd.read_csv(destinations_address)
destinations_sdf = pd.DataFrame.spatial.from_df(destinations_fc, "Address")
destinations_sdf.head()
destinations_fset = destinations_sdf.spatial.to_featureset()
destinations_fset
###Output
_____no_output_____
###Markdown
With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance.
###Code
%%time
# solve OD cost matrix tool for the origns and destinations
from arcgis.network.analysis import generate_origin_destination_cost_matrix
results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong,
destinations= destinations_fset, #destinations_fs_address,
cutoff=200,
origin_destination_line_shape='Straight Line')
print('Analysis succeeded? {}'.format(results.solve_succeeded))
###Output
Analysis succeeded? True
CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms
Wall time: 24.7 s
###Markdown
Let's see the output lines table.
###Code
od_df = results.output_origin_destination_lines.sdf
od_df
###Output
_____no_output_____
###Markdown
Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that.
###Code
# filter only the required columns
od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']]
# user pivot_table
od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID')
od_pivot
###Output
_____no_output_____
###Markdown
Write the pivot table to disk
###Code
od_pivot.to_csv('data/OD_Matrix.csv')
###Output
_____no_output_____
###Markdown
This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm.
###Code
od_map = my_gis.map('Loma Linda, CA')
od_map
od_map.draw(results.output_origin_destination_lines)
od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10})
od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8})
###Output
_____no_output_____
###Markdown
Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial).
###Code
import arcgis
from arcgis.gis import GIS
import pandas as pd
import datetime
import getpass
from IPython.display import HTML
from arcgis import geocoding
from arcgis.features import Feature, FeatureSet
from arcgis.features import GeoAccessor, GeoSeriesAccessor
my_gis = GIS('home')
###Output
_____no_output_____
###Markdown
We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits).
###Code
origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346']
origin_features = []
for origin in origin_coords:
reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0],
"y": origin.split(',')[1]})
origin_feature = Feature(geometry=reverse_geocode['location'],
attributes=reverse_geocode['address'])
origin_features.append(origin_feature)
origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint',
spatial_reference={'latestWkid': 4326})
origin_fset
###Output
_____no_output_____
###Markdown
Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits.
###Code
# Read csv files from data:
destinations_address = r"data/destinations_address.csv"
destinations_df = pd.read_csv(destinations_address)
destinations_sdf = pd.DataFrame.spatial.from_df(destinations_df, "Address")
destinations_sdf.head()
destinations_fset = destinations_sdf.spatial.to_featureset()
destinations_fset
###Output
_____no_output_____
###Markdown
With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance.
###Code
%%time
# solve OD cost matrix tool for the origns and destinations
from arcgis.network.analysis import generate_origin_destination_cost_matrix
results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong,
destinations= destinations_fset, #destinations_fs_address,
cutoff=200,
origin_destination_line_shape='Straight Line')
print('Analysis succeeded? {}'.format(results.solve_succeeded))
###Output
Analysis succeeded? True
CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms
Wall time: 24.7 s
###Markdown
Let's see the output lines table.
###Code
od_df = results.output_origin_destination_lines.sdf
od_df
###Output
_____no_output_____
###Markdown
Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that.
###Code
# filter only the required columns
od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']]
# user pivot_table
od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID')
od_pivot
###Output
_____no_output_____
###Markdown
Write the pivot table to disk
###Code
od_pivot.to_csv('data/OD_Matrix.csv')
###Output
_____no_output_____
###Markdown
This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm.
###Code
od_map = my_gis.map('Loma Linda, CA')
od_map
od_map.draw(results.output_origin_destination_lines)
od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10})
od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8})
###Output
_____no_output_____
###Markdown
Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Create destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial).
###Code
import arcgis
from arcgis.gis import GIS
import pandas as pd
import datetime
import getpass
from IPython.display import HTML
from arcgis import geocoding
from arcgis.features import Feature, FeatureSet
from arcgis.features import GeoAccessor, GeoSeriesAccessor
portal_url = 'https://wwww.arcgis.com'
#connect to your GIS
user_name = '<user_name>'
password = '<password>'
my_gis = GIS(portal_url, user_name, password)
###Output
_____no_output_____
###Markdown
We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits).
###Code
origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346']
origin_features = []
for origin in origin_coords:
reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0],
"y": origin.split(',')[1]})
origin_feature = Feature(geometry=reverse_geocode['location'],
attributes=reverse_geocode['address'])
origin_features.append(origin_feature)
origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint',
spatial_reference={'latestWkid': 4326})
origin_fset
###Output
_____no_output_____
###Markdown
Create destinations layer: We have address information for the destinations in a csv file, with the following code snippet, we can geocode the addresses to create a destination layer. You could [batch geocode](https://esri.github.io/arcgis-python-api/apidoc/html/arcgis.geocoding.htmlbatch-geocode) the addresses if you have a list of addresses. **Note**: Geocoding the addresses will consume credits.
###Code
# Read csv files from data:
destinations_address = r"data/destinations_address.csv"
destinations_df = pd.read_csv(destinations_address)
destinations_sdf = pd.DataFrame.spatial.from_df(destinations_fc, "Address")
destinations_sdf.head()
destinations_fset = destinations_sdf.spatial.to_featureset()
destinations_fset
###Output
_____no_output_____
###Markdown
With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance.
###Code
%%time
# solve OD cost matrix tool for the origns and destinations
from arcgis.network.analysis import generate_origin_destination_cost_matrix
results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong,
destinations= destinations_fset, #destinations_fs_address,
cutoff=200,
origin_destination_line_shape='Straight Line')
print('Analysis succeeded? {}'.format(results.solve_succeeded))
###Output
Analysis succeeded? True
CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms
Wall time: 24.7 s
###Markdown
Let's see the output lines table.
###Code
od_df = results.output_origin_destination_lines.sdf
od_df
###Output
_____no_output_____
###Markdown
Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that.
###Code
# filter only the required columns
od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']]
# user pivot_table
od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID')
od_pivot
###Output
_____no_output_____
###Markdown
Write the pivot table to disk
###Code
od_pivot.to_csv('data/OD_Matrix.csv')
###Output
_____no_output_____
###Markdown
This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm.
###Code
od_map = my_gis.map('Loma Linda, CA')
od_map
od_map.draw(results.output_origin_destination_lines)
od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10})
od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8})
###Output
_____no_output_____
###Markdown
Calculating Origin Destinations nXn Matrix given set of origins and destinations Table of ContentsOrigin Destinations nXn Matrix given set of origins and destinationsCreate origins layer:Get destinations layer:Convert to matrix formatConclusion The [Origin Destination(OD) Cost Matrix service](http://desktop.arcgis.com/en/arcmap/latest/extensions/network-analyst/od-cost-matrix.htm) helps you to create an OD cost matrix for multiple `origins` to multiple `destinations`. An OD cost matrix is a table that contains cost, such as travel time or travel distance, from each origin to each destination. Additionally, it ranks the destinations in ascending order based on the minimum cost required to travel. When generating an OD cost matrix, you can optionally specify the maximum number of destinations to find for each origin and the maximum time or distance to travel when searching for destinations.By default, the matrix is generated with columns - origin id, destination id, destination rank, total time and total distance. In this sample notebook , we will use this tool to get OD matrix if given a set of origin and destination points, either as a csv with latitude and longitude or csv file with list of addresses. In later part of this sample, we will format the table to get n by n matrix.This is useful when you want to solve other transportation problems with open source tools or heuristics. When it comes to real world TSP(Travelling Salesman Problem) or VRP(Vehicle Routing Problem) or other tranportation problems, data about travel time from every point to every other point can give you more realistic results than with euclidean distance. **Note** :If you run the tutorial using ArcGIS Online, 0.003 [credit](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits) will be consumed as there are 6 origin-destination pairs.As a first step, let's import required libraries and establish a connection to your organization which could be an ArcGIS Online organization or an ArcGIS Enterprise. If you dont have an ArcGIS account, [get ArcGIS Trial](https://www.esri.com/en-us/arcgis/trial).
###Code
import arcgis
from arcgis.gis import GIS
import pandas as pd
import datetime
import getpass
from IPython.display import HTML
from arcgis import geocoding
from arcgis.features import Feature, FeatureSet
from arcgis.features import GeoAccessor, GeoSeriesAccessor
my_gis = GIS('home')
###Output
_____no_output_____
###Markdown
We will see how to create layer for origins and destinations when we have latitude and longitude and when we have addresses to geocode for converting to layer respectively. Create origins layer: We have latitude and longitude information for origins, with the following code snippet, we can create a layer from the information. We will reverse geocode the latitude longitude information to find the locations.**Note**: Geocoding the addresses will consume [credits](https://www.esri.com/en-us/arcgis/products/arcgis-online/pricing/credits).
###Code
origin_coords = ['-117.187807, 33.939479', '-117.117401, 34.029346']
origin_features = []
for origin in origin_coords:
reverse_geocode = geocoding.reverse_geocode({"x": origin.split(',')[0],
"y": origin.split(',')[1]})
origin_feature = Feature(geometry=reverse_geocode['location'],
attributes=reverse_geocode['address'])
origin_features.append(origin_feature)
origin_fset = FeatureSet(origin_features, geometry_type='esriGeometryPoint',
spatial_reference={'latestWkid': 4326})
origin_fset
###Output
_____no_output_____
###Markdown
Get destinations layer:
###Code
addresses_item = my_gis.content.search('destinations_address', 'feature layer')[0]
addresses_item
destinations_sdf = addresses_item.layers[0].query(as_df=True)
destinations_sdf
destinations_fset = destinations_sdf.spatial.to_featureset()
destinations_fset
###Output
_____no_output_____
###Markdown
With these inputs, solve the problem with Origin Destintion matrix solver. Look up [the doc](https://developers.arcgis.com/rest/network/api-reference/origin-destination-cost-matrix-service.htm) to understand how this tool works and its parameters. Remember, `0.0005` credits per input origin and destination pair will be charged. For example, if there are `100` origins and `200` destinations, the cost will be `10` credits. If you specify a cutoff or limit the number of destinations, for instance, to find only `5` closest destinations within `10` minutes of every origin, the cost will still be `10` credits, as the credits depend on the number of input origin destination pairs. `TargetDestinationCount`- The maximum number of destinations that must be found for the origin. If a value is not specified, the value from the Number of Destinations to Find parameter is used. `Cutoff`- Specify the travel time or travel distance value at which to stop searching for destinations from the origin. Any destination beyond the cutoff value will not be considered. The value needs to be in the units specified by the Time Units parameter if the impedance attribute in your travel mode is time based or in the units specified by the Distance Units parameter if the impedance attribute in your travel mode is distance based. If a value is not specified, the tool will not enforce any travel time or travel distance limit when searching for destinations.Specify `origin_destination_line_shape` to see the output in map. Even though the lines are straight for performance reasons, they always store the travel time and travel distance along the street network, not straight-line distance.
###Code
%%time
# solve OD cost matrix tool for the origns and destinations
from arcgis.network.analysis import generate_origin_destination_cost_matrix
results = generate_origin_destination_cost_matrix(origins= origin_fset, #origins_fc_latlong,
destinations= destinations_fset, #destinations_fs_address,
cutoff=200,
origin_destination_line_shape='Straight Line')
print('Analysis succeeded? {}'.format(results.solve_succeeded))
###Output
Analysis succeeded? True
CPU times: user 379 ms, sys: 27.1 ms, total: 406 ms
Wall time: 24.7 s
###Markdown
Let's see the output lines table.
###Code
od_df = results.output_origin_destination_lines.sdf
od_df
###Output
_____no_output_____
###Markdown
Convert to matrix formatWe need to change the format to get a matrix with rows as origins and columns as destinations, with impedance value as travel time or travel distance. We will use the `pivot_table` feature of Pandas to accomplish that.
###Code
# filter only the required columns
od_df2 = od_df[['DestinationOID','OriginOID','Total_Distance','Total_Time']]
# user pivot_table
od_pivot = od_df2.pivot_table(index='OriginOID', columns='DestinationOID')
od_pivot
###Output
_____no_output_____
###Markdown
Write the pivot table to disk
###Code
od_pivot.to_csv('data/OD_Matrix.csv')
###Output
_____no_output_____
###Markdown
This is how we can get OD cost matrix when we have csv files with origin and destinations location information. We could read this matrix and provide this as input to a heuristics or an open-source algorithm.
###Code
od_map = my_gis.map('Loma Linda, CA')
od_map
od_map.draw(results.output_origin_destination_lines)
od_map.draw(destinations_fset, symbol={"type": "esriSMS","style": "esriSMSSquare","color": [255,115,0,255], "size": 10})
od_map.draw(origin_fset, symbol={"type": "esriSMS","style": "esriSMSCircle","color": [76,115,0,255],"size": 8})
###Output
_____no_output_____ |
Cognitive_Systems-Mathematics_and_Methods/week02/Assignment_2.ipynb | ###Markdown
Visualizing the data
###Code
drone_delivery_df.plot.scatter('x', 'y', s=2).set_title(
'Drone delivery destinations');
###Output
_____no_output_____
###Markdown
Looks like a forest after an emergency landing... :)
###Code
from sklearn import cluster
from ipywidgets import interact, fixed, widgets, interactive
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Attaching closest depot label to the destinations (for 3 clusters by k-means)
###Code
clustered_3means = cluster.KMeans(3).fit(drone_delivery_df)
drone_delivery_df_cluster_info = drone_delivery_df.copy()
drone_delivery_df_cluster_info['cluster'] = clustered_3means.labels_
drone_delivery_df_cluster_info.head(10)
def plot_clusters(df, clusters, title=None):
plt.scatter(df.x, df.y, s=2, c=clusters.labels_, cmap='tab20')
if hasattr(clusters, "cluster_centers_"):
plt.scatter(
clusters.cluster_centers_[:,0],
clusters.cluster_centers_[:,1],
s=80, c='red', alpha=0.5
)
if title: plt.title(title)
plot_clusters(drone_delivery_df, clustered_3means, 'Near-optimal depots for 3 clusters')
###Output
_____no_output_____
###Markdown
Playing a bit more with the number of depots
###Code
def compute_and_show_kmeans(n_clusters, df):
kmeans = cluster.KMeans(n_clusters).fit(df)
plot_clusters(df, kmeans)
interact(compute_and_show_kmeans, n_clusters=(1, 20), df=fixed(drone_delivery_df));
###Output
_____no_output_____
###Markdown
The computational time is quite noticable with higher amount of clusters, specifically:
###Code
for i in range(3, 24, 2):
print(f'Time taken for {i} clusters:')
%timeit -r 2 -n 5 cluster.KMeans(i).fit(drone_delivery_df)
print()
###Output
Time taken for 3 clusters:
83.6 ms ยฑ 6.52 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 5 clusters:
103 ms ยฑ 1.86 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 7 clusters:
160 ms ยฑ 5.06 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 9 clusters:
264 ms ยฑ 4.65 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 11 clusters:
283 ms ยฑ 7.42 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 13 clusters:
327 ms ยฑ 47.1 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 15 clusters:
362 ms ยฑ 19.5 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 17 clusters:
394 ms ยฑ 13.9 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 19 clusters:
482 ms ยฑ 22 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 21 clusters:
430 ms ยฑ 11 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
Time taken for 23 clusters:
440 ms ยฑ 19.8 ms per loop (mean ยฑ std. dev. of 2 runs, 5 loops each)
###Markdown
I quite enjoyed exploring the behaviour of other algorithms, see below...
###Code
# preparation for interactive input
n_clusters_widget = widgets.IntSlider(
description='Clusters',
value=10, min=1, max=20, step=1,
continuous_update=False,
)
options = {
"Agglomeration": (
cluster.AgglomerativeClustering,
dict(
n_clusters=n_clusters_widget,
affinity=widgets.ToggleButtons(
description='Affinity',
options=['euclidean', 'l1', 'l2', 'manhattan', 'cosine', 'precomputed']),
linkage=widgets.ToggleButtons(
description='Linkage',
options=['ward', 'complete', 'average', 'single']),
)
),
"Birch": (
cluster.Birch,
dict(
n_clusters=n_clusters_widget,
threshold=widgets.FloatSlider(
description='Threshold',
min=0, max=400, value=0.5, step=0.1,
continuous_update=False),
)
),
"K-Means": (
cluster.KMeans,
dict(n_clusters=n_clusters_widget)
),
}
algo_choice = widgets.ToggleButtons(options=options.keys(), description='Clustering')
output = widgets.Output()
def get_clusterer_and_widgets():
algo_key = algo_choice.value
return options[algo_key]
with output:
previous_settings = None
def redraw_output(clear=True):
if clear:
output.clear_output()
display(algo_choice)
_, widgets = get_clusterer_and_widgets()
for widget in widgets.values():
display(widget)
def compute_and_show():
global previous_settings
clusterer, widgets = get_clusterer_and_widgets()
kwargs = {key:widget.value for key, widget in widgets.items()}
# usually there are many notifications of the same event
# no idea why, therefore comparing the states
current_settings = (clusterer, kwargs)
if current_settings != previous_settings:
previous_settings = current_settings
clusters = clusterer(**kwargs).fit(drone_delivery_df)
redraw_output()
plot_clusters(drone_delivery_df, clusters)
def observe_widgets():
handler = lambda _: compute_and_show()
algo_choice.observe(handler)
unique_widgets = {
widget
for r, widget_dict in options.values()
for widget in widget_dict.values()
}
for widget in unique_widgets:
widget.observe(handler)
observe_widgets()
redraw_output(clear=False)
display(output)
###Output
_____no_output_____ |
notebooks/05.03-OPTIONAL-Widget_Events_2_--_Separating_Concerns.ipynb | ###Markdown
*OPTIONAL* Separating the logic from the widgetsA key principle in designing a graphical user interface is to separate the logic of an application from the graphical widgets the user sees. For example, in the super-simple password generator widget, the basic logic is to construct a sequence of random letters given the length. Let's isolate that logic in a function, without any widgets. This function takes a password length and returns a generated password string.
###Code
def calculate_password(length):
import string
import secrets
# Gaenerate a list of random letters of the correct length.
password = ''.join(secrets.choice(string.ascii_letters) for _ in range(length))
return password
###Output
_____no_output_____
###Markdown
Test out the function a couple times in the cell below with different lengths. Note that unlike our first pass through this, you can test this function without defining any widgets. This means you can write tests for just the logic, use the function as part of a library, etc.
###Code
calculate_password(10)
###Output
_____no_output_____
###Markdown
The Graphical ControlsThe code to build the graphical user interface widgets is the same as the previous iteration.
###Code
helpful_title = widgets.HTML('Generated password is:')
password_text = widgets.HTML('No password yet')
password_text.layout.margin = '0 0 0 20px'
password_length = widgets.IntSlider(description='Length of password',
min=8, max=20,
style={'description_width': 'initial'})
password_widget = widgets.VBox(children=[helpful_title, password_text, password_length])
password_widget
###Output
_____no_output_____
###Markdown
Connecting the logic to the widgetsWhen the slider `password_length` changes, we want to call `calculate_password` to come up with a new password, and set the value of the widget `password` to the return value of the function call.`update_password` takes the change from the `password_length` as its argument and sets the `password_text` with the result of `calculate_password`.
###Code
def update_password(change):
length = int(change.new)
new_password = calculate_password(length)
# NOTE THE LINE BELOW: it relies on the password widget already being defined.
password_text.value = new_password
password_length.observe(update_password, names='value')
###Output
_____no_output_____
###Markdown
Now that the connection is made, try moving the slider and you should see the password update.
###Code
password_widget
###Output
_____no_output_____
###Markdown
Benefits of separating concernsSome advantages of this approach are:+ Changes in `ipywidgets` only affect your controls setup.+ Changes in functional logic only affect your password generation function. If you decide that a password with only letters isn't secure enough and decide to add some numbers and/or special characters, the only code you need to change is in the `calculate_password` function.+ You can write unit tests for your `calculate_password` function -- which is where the important work is being done -- without doing in-browser testing of the graphical controls. Using interactNote that using interact to build this GUI also emphasizes the separation between the logic and the controls. However, interact also is much more opinionated about how the controls are laid out: controls are in a vbox above the output of the function. Often this is great for a quick initial GUI, but is restrictive for more complex GUIs.
###Code
from ipywidgets import interact
from IPython.display import display
interact(calculate_password, length=(8, 20));
###Output
_____no_output_____
###Markdown
We can make the interact a bit nicer by printing the result, rather than just returning the string. This time we use `interact` as a decorator.
###Code
@interact(length=(8, 20))
def print_password(length):
print(calculate_password(length))
###Output
_____no_output_____ |
code/notebooks/render_tree.ipynb | ###Markdown
Prepare tree rendering package This notebooks manipulates a tree and generates rendering packages. It does: - Collapse a tree at given taxonomic rank(s) based on serveral criteria. - Generate color gradient for branch support values. - Generate files that can be directly parsed and rendered using [**iTOL**](https://itol.embl.de/) and [**FigTree**](http://tree.bio.ed.ac.uk/software/figtree/). Preparation Dependencies
###Code
import re
import numpy as np
import pandas as pd
from skbio import TreeNode
###Output
_____no_output_____
###Markdown
Input files Tree file (with node IDs, and without support values)
###Code
tree_fp = '../trees/release/astral.cons.nwk'
###Output
_____no_output_____
###Markdown
Taxonomic information file (original or tax2tree-curated)
###Code
taxonomy_fp = '../taxonomy/tax2tree/ncbi/astral/filled_ranks.tsv'
###Output
_____no_output_____
###Markdown
Custom node attributes (bootstrap, estimated time range, metadata category, additional name, etc.)
###Code
custom_attrs_fps = {
'lpp': '../trees/release/supports/astral.txt'}
###Output
_____no_output_____
###Markdown
Parameters Collapse the tree from this rank up. For example, "class" will have the tree collapsed at class (if possible) or phylum. Leave empty or None if not needed.
###Code
# collapse_rank = None
collapse_rank = 'genus'
###Output
_____no_output_____
###Markdown
Determine the visual length of a collapsed clade (triangle or line). Options are: mean, std (don't use), min, 25%, 50% (median), 75% and max.
###Code
collapse_length_func = '50%'
###Output
_____no_output_____
###Markdown
Clades with descendants less than this threshold will not be collapsed. Either a fixed number, or a rank-to-number dictionary. Example: phylum = 1, class = 10. Leave 0 if not needed.
###Code
# min_clade_size = 0
# for full-scale (10k-taxon) trees
min_clade_size = {'kingdom': 1, 'phylum': 1, 'class': 5, 'order': 50, 'family': 50, 'genus': 50, 'species': 50}
# for 1k-taxon trees
# min_clade_size = {'kingdom': 1, 'phylum': 1, 'class': 1, 'order': 5, 'family': 10, 'genus': 10, 'species': 10}
# for class / phylum only:
# min_clade_size = {'kingdom': 1, 'phylum': 1, 'class': 10, 'order': 0, 'family': 0, 'genus': 0, 'species': 0}
###Output
_____no_output_____
###Markdown
Split clades with descendants less than this threshold of *fraction* of the dominant clade of the same taxon will not be collapsed. For example, `Firmicutes_1` has 1000 tips, then if `Firmicutes_10` has 45 tips (< 1000 * 5%), it will not be collapsed.
###Code
# min_split_clade_frac = 0
min_split_clade_frac = 0.05
###Output
_____no_output_____
###Markdown
Whether to delete tips not belonging to any collapsed clades.
###Code
delete_uncollapsed = True
###Output
_____no_output_____
###Markdown
Whether to hide uncollapsed tip names. Effective when `delete_uncollapsed` is `False`.
###Code
hide_uncollapsed = True
###Output
_____no_output_____
###Markdown
Manipulate node labels using the following regular expressions (pairs of pattern and replacement).
###Code
label_format_regexes = [
(r'^Candidatus ', r'Ca. '),
(r'^candidate division ', r'Ca. ')
]
###Output
_____no_output_____
###Markdown
Append rank code to taxon (e.g.,: `Bacteria` => `k__Bacteria`).
###Code
append_rank_code = True
###Output
_____no_output_____
###Markdown
Append clade size to taxon
###Code
append_clade_size = True
###Output
_____no_output_____
###Markdown
Low and high end of color gradient.
###Code
color_range = ('#f0f0f0', '#191919') # gray
# color_range = ('#deebf7', '#3182bd') # blue
###Output
_____no_output_____
###Markdown
Helpers Basic utilities
###Code
def sort_node_ids(d):
"""Sort names of tips and internal nodes."""
return sorted(d, key=lambda x: (x[0], int(x[1:])))
def digits(num):
"""Get number digits after decimal point."""
if not num.replace('.', '').isdigit() or num.count('.') != 1:
raise ValueError('Not a valid float number: %s' % num)
return len(num.split('.')[1])
def de_suffix(taxon, names):
"""Restore suffixed taxon name."""
if '_' not in taxon:
return taxon
res = '_'.join(taxon.split('_')[:-1])
return res if res in names else taxon
###Output
_____no_output_____
###Markdown
Node dimension calculation
###Code
def get_clade_dimensions(node):
"""Calculate the dimensions of a clade.
Parameters
----------
node : skbio.TreeNode
clade to calculate
Returns
-------
pd.Series
count, mean, std, min, 25%, 50%, 75%, max
"""
lengths = pd.Series(x.accumulate_to_ancestor(node) for x in node.tips())
return lengths.describe()
###Output
_____no_output_____
###Markdown
Selective tree shearing and pruning
###Code
def selective_prune(tree, tips_to_keep, nodes_to_keep=[]):
"""Shear a tree and selectively prune it.
Parameters
----------
tree : skbio.TreeNode
tree to shear
tips_to_keep : iterable of str
tip names to keep
nodes_to_keep : iterable of str
internal node names to keep
Returns
-------
tree : skbio.TreeNode
resulting tree
Notes
-----
Inherited from scikit-bio's `shear` and `prune` functions, but will
selectively remove internal nodes.
"""
tcopy = tree.deepcopy()
ids = set(tips_to_keep)
marked = set()
for tip in tcopy.tips():
if tip.name in ids:
marked.add(tip)
for anc in tip.ancestors():
if anc in marked:
break
else:
marked.add(anc)
for node in list(tcopy.traverse()):
if node not in marked:
node.parent.remove(node)
ids = set(nodes_to_keep)
nodes_to_remove = []
for node in tcopy.traverse(include_self=False):
if len(node.children) == 1:
if node.name not in ids:
nodes_to_remove.append(node)
for node in nodes_to_remove:
child = node.children[0]
if child.length is None or node.length is None:
child.length = child.length or node.length
else:
child.length += node.length
if node.parent is None:
continue
node.parent.append(child)
node.parent.remove(node)
return tcopy
###Output
_____no_output_____
###Markdown
Newick string formatting
###Code
def format_newick(tree, operators=',:_;()[] ', digits=None):
"""Generate a Newick string from a tree.
Parameters
----------
tree : skbio.TreeNode
tree to convert to a Newick string
operators : str
list of characters that have special meaning in a tree file so that
a node name containing any of them must be quoted
digits : int or tuple of (int, int)
number of digits (float and scientific) to print in a branch length
Returns
-------
str
formatted Newick string
Notes
-----
Modified from scikit-bio's `_tree_node_to_newick`. In addition to the
prototype, it can do:
1. Keep spaces without converting them to underscores.
2. Print branch lengths based on given precision.
"""
res = ''
operators = set(operators or '')
if isinstance(digits, int):
digits = (digits, digits)
current_depth = 0
nodes_left = [(tree, 0)]
while len(nodes_left) > 0:
entry = nodes_left.pop()
node, node_depth = entry
if node.children and node_depth >= current_depth:
res += '('
nodes_left.append(entry)
nodes_left += ((child, node_depth + 1) for child in
reversed(node.children))
current_depth = node_depth + 1
else:
if node_depth < current_depth:
res += ')'
current_depth -= 1
if node.name:
escaped = "%s" % node.name.replace("'", "''")
if any(t in operators for t in node.name):
res += "'"
res += escaped
res += "'"
else:
res += escaped
if node.length is not None:
res += ':'
length = str(node.length)
if digits:
length = '%.*g' % ((digits[0] if 'e' in length
else digits[1]), node.length)
res += length
if nodes_left and nodes_left[-1][1] == current_depth:
res += ','
return res + ';'
###Output
_____no_output_____
###Markdown
Color gradient generation
###Code
def hex2rgb(h):
return tuple(int(h.lstrip('#')[i: i + 2], 16) for i in (0, 2 ,4))
def rgb2hex(r):
return '#{:02x}{:02x}{:02x}'.format(r[0], r[1], r[2])
def make_color_palette(start, end, n=101):
"""Generate a gradient of 101 colors.
Parameters
----------
start : str
start color in hex format
end : str
end color in hex format
n : int
number of colors to return
Returns
-------
list of str
colors in hex format
"""
start_, end_ = hex2rgb(start), hex2rgb(end)
seqs = [np.linspace(start_[i], end_[i], n).astype(int) for i in range(3)]
rgbs = [[seqs[x][i] for x in range(3)] for i in range(n)]
return [rgb2hex(x) for x in rgbs]
def make_color_gradient(node2val, colors):
"""Deal with polytomic taxa.
Parameters
----------
node2val : dict of float or int
node ID to value map
colors : list of str
101 colors for values of 0 to 100
Returns
-------
dict of str
node ID to color map
"""
for id_, val in node2val.items():
if val is None or np.isnan(val) or val == '':
node2val[id_] = 0
elif not isinstance(val, int) and not isinstance(val, float):
raise ValueError('Invalid number %s.' % val)
# shrink larger integers to 0-100 range
max_val = max(node2val.values())
if max_val > 100:
for id_ in node2val:
node2val[id_] /= (max_val / 100)
# convert fraction into percentage, and percentage to integer
convert = True if max_val <= 1 else False
for id_ in node2val:
try:
node2val[id_] = (
int(node2val[id_] * 100) if convert else int(node2val[id_]))
except ValueError:
print('%s' % id_)
# map support to color
return {k: colors[v] for k, v in node2val.items()}
###Output
_____no_output_____
###Markdown
iTOL file generation
###Code
def write_itol_label(f, id2label):
"""Generate iTOL node label file."""
f.write('LABELS\n')
f.write('SEPARATOR TAB\n')
f.write('DATA\n')
for id_ in sort_node_ids(id2label):
f.write('%s\t%s\n' % (id_, id2label[id_]))
def write_itol_collapse(f, nodes_to_collapse):
"""Generate an iTOL collapse file.
Parameters
----------
nodes_to_collapse : iterable of str
node IDs to collapse
f : file handle
file to write collapse information
"""
f.write('COLLAPSE\n')
f.write('DATA\n')
for id_ in nodes_to_collapse:
f.write('%s\n' % id_)
def write_itol_tree_colors(f, id2color, target='branch',
label_or_style='normal', size='1'):
"""Generate an iTOL tree colors file.
Parameters
----------
id2label : dict of str
node ID to text map
f : file handle
file to write node texts
target, label_or_style, size : str or dict of str
iToL flavors, either a fixed value or a node ID to value map
target == "type" in iTOL jargon
"""
f.write('TREE_COLORS\n')
f.write('SEPARATOR TAB\n')
f.write('DATA\n')
# format: ID, target, color, label_or_style, size_factor
for id_ in sort_node_ids(id2color):
f.write('%s\t%s\t%s\t%s\t%s\n' % (
id_, target[id_] if isinstance(target, dict) else target,
id2color[id_], label_or_style[id_] if isinstance(
label_or_style, dict) else label_or_style,
size[id_] if isinstance(size, dict) else size))
def write_itol_dataset_text(f, title, id2text, position='0.5',
color='#000000', style='normal',
size='1', rotation='0'):
"""Generate an iTOL text dataset file.
Parameters
----------
id2label : dict of str
node ID to text map
f : file handle
file to write node texts
title : str
title of this dataset
position, color, style, size, rotation : str or dict of str
iToL flavors, either a fixed value or a node ID to value map
"""
f.write('DATASET_TEXT\n')
f.write('SEPARATOR TAB\n')
f.write('DATASET_LABEL\t%s\n' % title)
f.write('SHOW_INTERNAL\t1\n')
f.write('DATA\n')
# format: ID, label, position, color, style, size_factor, rotation
for id_ in sort_node_ids(id2text):
text = id2text[id_]
if isinstance(text, float):
text = '%.3g' % text
f.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (
id_,
text,
position[id_] if isinstance(position, dict) else position,
color[id_] if isinstance(color, dict) else color,
style[id_] if isinstance(style, dict) else style,
size[id_] if isinstance(size, dict) else size,
rotation[id_] if isinstance(rotation, dict) else rotation))
def write_itol_dataset_style(f, title, ids, target='branch', what='node',
color='#000000', factor='normal', style='1',
bgcolor=None):
"""Generate an iTOL style dataset file.
Parameters
----------
f : file handle
file to write node texts
title : str
title of this dataset
ids : iterable of str
node ID list
target, what, color, factor, style, bgcolor : str or str or dict
iToL flavors, either a fixed value or a node ID to value map
"""
f.write('DATASET_STYLE\n')
f.write('SEPARATOR TAB\n')
f.write('DATASET_LABEL\t%s\n' % title)
f.write('COLOR\t#000000\n')
f.write('DATA\n')
# format: ID, target, what, color, factor, style, bgcolor
for id_ in sort_node_ids(ids):
f.write('%s\t%s\t%s\t%s\t%s\t%s' % (
id_,
target[id_] if isinstance(target, dict) else target,
what[id_] if isinstance(what, dict) else what,
color[id_] if isinstance(color, dict) else color,
factor[id_] if isinstance(factor, dict) else factor,
style[id_] if isinstance(style, dict) else style))
if bgcolor is not None:
f.write('\t%s' % bgcolor[id_] if isinstance(bgcolor, dict) else bgcolor)
f.write('\n')
###Output
_____no_output_____
###Markdown
FigTree file generation In a FigTree-compatible Nexus tree file, nodes (tips and internal nodes) and taxa may contain attributes in the following format:```(taxon1,taxon2)[&!name="Escherichia coli",support=90,range={80,95},!color=ff0000]:1.234,...``` Here "!name", "support", "range" and "!color" are the attributes.
###Code
def make_figtree_attr_str(name, attr_db, attrs={}):
"""Generate a FigTree-compatible attribute string.
Parameters
----------
name : str
name of node or taxon to annotate
attr_db : dict of dict of str
map of names to attributes
Returns
-------
str
formatted attribute string
Notes
-----
For example, attr_db = {
'!name': {'N1': '"spA"', 'N2': '"spB"'...},
'support': {'N1': '90', 'N2': '75'...}
}
For node "N2", the result will be `[&!name="spB",support=75]`.
All values should be str. Strings should be double-quoted. Tuples should be
written like `{"spA",0.95,#000000}`. Special FigTree-aware attributes such
as "name", "color" and "collapse" should have a prefix `!`.
"""
for attr in attr_db:
if name in attr_db[attr]:
val = attr_db[attr][name]
if val: # omit null or empty string
attrs[attr] = val
attr_strs = []
for attr, val in sorted(attrs.items(), key=lambda x: x[0]):
attr_strs.append('%s=%s' % (attr, val))
return '[&%s]' % ','.join(attr_strs) if len(attr_strs) > 0 else ''
def add_figtree_node_attrs(tree, node2attrs):
"""Add FigTree-compatible attributes to nodes of a tree.
Parameters
----------
tree : skbio.TreeNode
tree to add node attributes to
node2attrs : dict of dict
map of node names to attributes
"""
for node in tree.traverse(include_self=True):
if not node.name:
continue
attrs = {} if node.is_tip() else {'id': '"%s"' % node.name}
attr_str = make_figtree_attr_str(node.name, node2attrs, attrs)
node.name = ('%s%s' % (node.name, attr_str) if node.is_tip()
else attr_str)
def add_figtree_taxon_attrs(tree, taxon2attrs):
"""Add FigTree-compatible attributes to the taxon labels.
Parameters
----------
tree : skbio.TreeNode
tree to add node attributes to
taxon2attrs : dict of dict
map of taxa to attributes
Returns
-------
list of str
taxon labels with attributes appended
"""
res = []
for taxon in sorted(tree.subset()):
attr_str = make_figtree_attr_str(taxon, taxon2attrs)
res.append('%s%s' % (taxon, attr_str))
return res
def write_figtree_nexus(tree, f, title='tree1', taxlabels=None):
"""Generate a FigTree-compatible Nexus tree file.
Parameters
----------
tree : skbio.TreeNode
tree to add node attributes to
f : file handle
file to write nexus tree
title : str
title of the tree
taxlabels : list of str
custom taxon labels to write
"""
f.write('#NEXUS\n')
f.write('begin taxa;\n')
f.write('\tdimensions ntax=%d;\n' % tree.count(tips=True))
f.write('\ttaxlabels\n')
if taxlabels is None:
taxlabels = sorted(tree.subset())
for taxon in taxlabels:
f.write('\t%s\n' % taxon)
f.write(';\n')
f.write('end;\n')
f.write('\n')
f.write('begin trees;\n')
f.write('\ttree %s = [&%s] ' % (
title, 'R' if len(tree.children) == 2 else 'U'))
f.write(format_newick(tree, operators=None))
f.write('\n')
f.write('end;\n')
###Output
_____no_output_____
###Markdown
Pre-processing Read and process tree Read tree.
###Code
tree = TreeNode.read(tree_fp)
n, m = tree.count(), tree.count(tips=True)
print('Tree has %d tips and %d internal nodes.' % (m, n - m))
tips = tree.subset()
###Output
_____no_output_____
###Markdown
Convert null branch lengths to zero.
###Code
for node in tree.traverse(include_self=False):
node.length = node.length or 0.0
###Output
_____no_output_____
###Markdown
Get the precision (maximum number of float or scientific notion digits) of branch lengths. Will be useful in the correct formatting of branch lengths after collapsing the tree.
###Code
max_f, max_e = 0, 0
for node in tree.traverse():
if node.length is not None:
x = str(float(node.length))
if 'e' in x:
max_e = max(max_e, digits(str(float(x.split('e')[0]))))
else:
max_f = max(max_f, digits(x))
max_f, max_e
###Output
_____no_output_____
###Markdown
Calculate number of descendants of each node.
###Code
node2n = {}
for node in tree.postorder(include_self=True):
if node.is_tip():
node2n[node.name] = 1
else:
node2n[node.name] = sum([node2n[x.name] for x in node.children])
###Output
_____no_output_____
###Markdown
Read and process taxonomy
###Code
dfr = pd.read_csv(taxonomy_fp, sep='\t', index_col=0)
dfr = dfr[dfr.index.isin(tips)]
dfr.index.name = 'node'
dfr.dropna().head(5)
ranks = dfr.columns.tolist()
ranks
###Output
_____no_output_____
###Markdown
Tree annotation Generate node labels The lowest common ancestor (LCA) of genomes represented by each taxon will receive this taxon as the node label. One node may receive multiple taxa if they all meet this criterium.If this operation is applied to the tax2tree consensus strings (`consensus_ranks.tsv`), the outcome should match the labels decorated to the tree by tax2tree (`decorations_by_rank.tsv`).In the current analysis, the input file should be the tax2tree consensus string filled by taxa representing single genomes (`filled_ranks.tsv`). Therefore the outcome will contain more information. Both tips and internal nodes will be included.
###Code
labels = {}
for rank in ranks:
for taxon in dfr[rank].value_counts().index:
indices = dfr[dfr[rank] == taxon].index.tolist()
node = (indices[0] if len(indices) == 1
else tree.lca(list(tips.intersection(indices))).name)
labels.setdefault(node, {})[rank] = taxon
dfl = pd.DataFrame.from_dict(labels, orient='index')
dfl.index.name = 'node'
dfl = dfl[ranks]
dfl = dfl.loc[sorted(dfl.index, key=lambda x: (x[0], int(x[1:])))]
dfl.head(3)
###Output
_____no_output_____
###Markdown
Get the highest-rank name when multiple ranks have names in a node label.
###Code
def get_highest_taxon(row):
"""Get the highest taxon in a row."""
for rank in row.index:
if pd.notnull(row[rank]):
return rank, row[rank]
return np.nan, np.nan
dfl['hrank'], dfl['htaxon'] = zip(*dfl.apply(get_highest_taxon, axis=1))
dfl[['hrank', 'htaxon']].dropna().head(5)
###Output
_____no_output_____
###Markdown
Collapse clades at or above a rank Identify the ranks to collapse.
###Code
collapse_ranks = []
for rank in ranks:
collapse_ranks.append(rank)
if collapse_rank and rank == collapse_rank:
break
print('Collapse at the following ranks: %s.' % ', '.join(collapse_ranks))
###Output
Collapse at the following ranks: kingdom, phylum, class, order, family, genus.
###Markdown
Generate a list of candidate nodes.
###Code
df_can = dfl[dfl['hrank'].isin(collapse_ranks)][['hrank', 'htaxon']]
df_can['size'] = df_can.index.to_series().map(node2n)
df_can.head()
###Output
_____no_output_____
###Markdown
Exclude nodes with number of descendants below threshold.
###Code
if min_clade_size:
to_keep = []
for row in df_can.itertuples():
th = min_clade_size[row.hrank] if isinstance(min_clade_size, dict) else min_clade_size
if row.size >= th:
to_keep.append(row.Index)
df_can = df_can[df_can.index.isin(to_keep)]
df_can.shape[0]
df_ = df_can[df_can['htaxon'].str.contains('_\d+$', regex=True)].copy()
###Output
_____no_output_____
###Markdown
Exclude split clades of the same taxon which has less than a fraction of the dominant clade.
###Code
if min_split_clade_frac > 0:
df_ = df_can[df_can['htaxon'].str.contains('_\d+$', regex=True)].copy()
df_['taxon'], df_['idx'] = zip(*df_['htaxon'].apply(lambda x: x[::-1]).str.split(
'_', n=1).apply(lambda x: (x[1][::-1], x[0][::-1])))
top_clade_sizes = dict(df_.query('idx == "1"')[['taxon', 'size']].values.tolist())
df_ = df_[df_['size'] >= df_['taxon'].map(top_clade_sizes) * min_split_clade_frac]
df_can = df_can[df_can.index.isin(df_.index) | ~df_can['htaxon'].str.contains('_\d+$', regex=True)]
df_can.shape[0]
###Output
_____no_output_____
###Markdown
Get the dimensions of clades represented by internal nodes.
###Code
tips = tree.subset()
dimensions = {x: get_clade_dimensions(tree.find(x)) for x in df_can.index if x not in tips}
df_dim = pd.DataFrame.from_dict(dimensions, orient='index')
df_dim = df_dim.loc[sorted(df_dim.index, key=lambda x: int(x[1:]))]
df_dim.head()
###Output
_____no_output_____
###Markdown
Determine which clades (as represented by nodes) should be collapsed. The rationale is: Start from the lowest rank, move up the hierarchy. If a node is already marked as "collapsed", all its ancestral nodes will be prohibited from being selected.
###Code
nodes_to_collapse = []
nodes_to_skip = set()
for rank in collapse_ranks[::-1]:
for node in df_can[df_can['hrank'] == rank].index:
if node not in nodes_to_skip:
nodes_to_collapse.append(node)
for anc in tree.find(node).ancestors():
nodes_to_skip.add(anc.name)
print('Nodes to collapse: %d.' % len(nodes_to_collapse))
###Output
Nodes to collapse: 207.
###Markdown
Calculate how many tips (genomes) are covered by the collapsed clades.
###Code
tips_covered = set()
for name in nodes_to_collapse:
node = tree.find(name)
tips = set([name]) if node.is_tip() else node.subset()
if len(tips.intersection(tips_covered)) > 0:
raise ValueError('Overlapping clades detected.')
tips_covered.update(tips)
tips_missed = tree.subset() - tips_covered
print('Tips covered: %d. Tips missed: %d.'
% (len(tips_covered), len(tips_missed)))
###Output
Tips covered: 6944. Tips missed: 3631.
###Markdown
Tree visualization Tree and labels manipulation Tree pruning Original tree dimensions.
###Code
tree_tips = tree.subset()
tree_nodes = set(x.name for x in tree.non_tips(include_self=True))
print('Original tree has %d tips and %d internal nodes.'
% (len(tree_tips), len(tree_nodes)))
###Output
Original tree has 10575 tips and 10574 internal nodes.
###Markdown
Prune tree to include collapsed clades only.
###Code
nodes_w_labels = [x for x in dfl['htaxon'].dropna().index if x in tree_nodes]
tree1 = selective_prune(tree, tips_covered, nodes_w_labels) if collapse_rank and delete_uncollapsed else tree.copy()
tree1_tips = tree1.subset()
tree1_nodes = set(x.name for x in tree1.non_tips(include_self=True))
print('Output tree has %d tips and %d internal nodes.'
% (len(tree1_tips), len(tree1_nodes)))
###Output
Output tree has 6944 tips and 6996 internal nodes.
###Markdown
Export pruned tree.
###Code
with open('pruned_tree.nwk', 'w') as f:
f.write('%s\n' % format_newick(tree1, operators=None))
###Output
_____no_output_____
###Markdown
Tree shrinking Generate a tree in which the collapsed clades are actually deleted.
###Code
tree3 = tree1.copy()
nodes_to_remove = []
for node in tree3.non_tips():
if node.name in nodes_to_collapse:
node.length += df_dim[collapse_length_func][node.name]
nodes_to_remove.extend(node.children)
tree3.remove_deleted(lambda x: x in nodes_to_remove)
tree3.prune()
print('Collapsed tree has %d tips.' % tree3.count(tips=True))
with open('collapsed_tree.nwk', 'w') as f:
f.write(format_newick(tree3, operators=None))
###Output
Collapsed tree has 207 tips.
###Markdown
Node label formatting Format node label strings.
###Code
name_map = dfl['htaxon'].to_dict()
if len(label_format_regexes) > 0:
for id_ in name_map:
for pattern, repl in label_format_regexes:
name_map[id_] = re.sub(pattern, repl, name_map[id_])
if append_rank_code is True:
for id_ in name_map:
name_map[id_] = '%s__%s' % (dfl['hrank'][id_][0], name_map[id_])
if append_clade_size is True:
for id_ in name_map:
n = node2n[id_]
if n > 1:
name_map[id_] = '%s (%d)' % (name_map[id_], node2n[id_])
sorted(name_map.items())[:5]
tip_name_map, node_name_map = {}, {}
for id_, name in name_map.items():
if id_ in tree_tips:
tip_name_map[id_] = name
elif id_ in tree_nodes:
node_name_map[id_] = name
###Output
_____no_output_____
###Markdown
Additional attributes
###Code
if custom_attrs_fps:
dfa = {}
for name, fp in custom_attrs_fps.items():
dfa[name] = pd.read_table(fp, index_col=0, names=[name])
###Output
_____no_output_____
###Markdown
FigTree file generation Generate FigTree tip and node name maps.
###Code
figtree_tip_name_map = {k: '"%s"' % v for k, v in tip_name_map.items()}
figtree_node_name_map = {k: '"%s"' % v for k, v in node_name_map.items()}
if collapse_rank and hide_uncollapsed:
for name in figtree_tip_name_map:
if name in tips_missed:
figtree_tip_name_map[name] = '""'
###Output
_____no_output_____
###Markdown
Let FigTree display internal node labels without displaying labels of tips (including collapsed clades).
###Code
no_labels = set(nodes_to_collapse).union(tree1_tips)
figtree_label_map = {k: v for k, v in figtree_node_name_map.items() if k not in no_labels}
tip2attrs = {'!name': figtree_tip_name_map}
node2attrs = {'!name': figtree_node_name_map, 'label': figtree_label_map}
###Output
_____no_output_____
###Markdown
Generate a FigTree collapse map.
###Code
figtree_collapse_map = {}
tree_radius = max(x.accumulate_to_ancestor(tree1) for x in tree1.tips())
for name in nodes_to_collapse:
if name not in tree1_tips:
length = df_dim[collapse_length_func][name]
height = tree_radius - tree1.find(name).accumulate_to_ancestor(tree1) - length
figtree_collapse_map[name] = '{"collapsed",%.*g}' % (max_f, height)
sorted(figtree_collapse_map.items())[:5]
node2attrs['!collapse'] = figtree_collapse_map
###Output
_____no_output_____
###Markdown
Generate FigTree size map.
###Code
node2attrs['size'] = node2n
###Output
_____no_output_____
###Markdown
Generate additional attributes for FigTree.
###Code
if custom_attrs_fps:
for name, df_ in dfa.items():
if np.issubdtype(df_[name], np.number):
map_ = {k: str(v) for k, v in df_[name].iteritems()}
else:
map_ = {k: '"%s"' % v for k, v in df_[name].iteritems()}
tip2attrs[name] = node2attrs[name] = map_
###Output
_____no_output_____
###Markdown
Write FigTree files.
###Code
tree2 = tree1.copy()
taxlabels = add_figtree_taxon_attrs(tree2, tip2attrs)
add_figtree_node_attrs(tree2, node2attrs)
with open('figtree.tre', 'w') as f:
write_figtree_nexus(tree2, f, taxlabels=taxlabels)
print('Task completed.')
###Output
Task completed.
###Markdown
iTOL files generation Step 1: Upload the already exported pruned tree file (Newick format) to iTOL. Write iTOL node label file. (Applies to both tips and internal nodes, including collapsed triangles.)
###Code
with open('label.txt', 'w') as f:
write_itol_label(f, {**node_name_map, **tip_name_map})
###Output
_____no_output_____
###Markdown
Write iTOL branch text file
###Code
branch_name_map = {k: v for k, v in node_name_map.items() if k not in nodes_to_collapse}
with open('branch_text.txt', 'w') as f:
# position = 0.5: at the middle of branch
write_itol_dataset_text(f, 'branch text', branch_name_map, position='0.5', size='1')
###Output
_____no_output_____
###Markdown
Write iTOL collapse file.
###Code
if collapse_rank:
with open('collapse.txt', 'w') as f:
write_itol_collapse(f, sorted(
x for x in nodes_to_collapse if x in tree1_nodes))
###Output
_____no_output_____
###Markdown
Write iTOL files for extra node attributes.
###Code
color_gradient = make_color_palette(color_range[0], color_range[1])
if custom_attrs_fps:
for name, df_ in dfa.items():
# node text
with open('%s_node_text.txt' % name, 'w') as f:
write_itol_dataset_text(
f, '%s node text' % name, df_[name].to_dict(), position='1', size='1')
# branch color gradient
if np.issubdtype(df_[name], np.number):
branch_color_map = make_color_gradient(df_[name].to_dict(), color_gradient)
with open('%s_branch_color.txt' % name, 'w') as f:
write_itol_dataset_style(
f, '%s color gradient' % name, branch_color_map, target='branch',
what='node', color=branch_color_map)
print('Task completed!')
###Output
Task completed!
|
Tedtalks data project_ss1078.ipynb | ###Markdown
2553 rows
###Code
# taking tags that have occurred more than 180 times to create columns
count_vector = CountVectorizer(stop_words='english',min_df=180/len(data))
tag_array = count_vector.fit_transform(data.tags).toarray()
tag_matrix = pd.DataFrame(tag_array, columns = count_vector.get_feature_names())
tag_matrix = tag_matrix.add_prefix('tags_')
# append the columns obtained to the base data
data = pd.concat([data,tag_matrix], axis=1)
data=data.drop(['tags'], axis = 1) # drop tags column
#list(data)
data.head()
# all date operations
data['film_date'] = data['film_date'].apply(lambda x: datetime.date.fromtimestamp(int(x)))
data['published_date'] = data['published_date'].apply(lambda x: datetime.date.fromtimestamp(int(x)))
data['film_month'] = data['film_date'].apply(lambda x: x.month)
data['pub_month'] = data['published_date'].apply(lambda x: x.month)
data['film_weekday'] = data['film_date'].apply(lambda x: x.weekday()) # Monday: 0, Sunday: 6
data['pub_weekday'] = data['published_date'].apply(lambda x: x.weekday())
data[['film_date','published_date']].head()
# pairplots between numerical variables to check for evident patterns and correlations
nums = ['comments', 'duration', 'num_speaker', 'views']
sns.pairplot(data, vars=nums, size=3);
sns.jointplot(x=data['languages'], y=data['views'], kind='reg').annotate(stats.pearsonr)
###Output
_____no_output_____
###Markdown
There are a few videos which have high views despite common languages. These may be outliers which have high views due to some other factors. We will have to investigate those.
###Code
sns.jointplot(x=data['views'], y=data['comments'], kind='reg').annotate(stats.pearsonr)
###Output
_____no_output_____
###Markdown
There seems to be a very high correlation between comments and views as it is intuitive.
###Code
# check relation between duration, comments and views
data_sorted=data.sort_values(by='views',ascending=True)
df2=data_sorted.iloc[:20,:]
df2.index=range(0,len(df2))
#visualization
data_viz = [
{
'y': df2.views,
'x': df2.index,
'mode': 'markers',
'marker': {
'color': df2.duration,
'size': df2.comments,
'showscale': True
},
"text" : df2.main_speaker
}
]
iplot(data_viz)
###Output
_____no_output_____
###Markdown
Clearly, there are a few videos which have a low views and longer. This may be an important factor for the model
###Code
data['event'].unique()
data['event_category'] = data.event.apply(lambda x: "TEDx" if "TEDx" in x else ("TED" if "TED" in x else "Other"))
data['event_category'].value_counts()
data['duration']= data['duration']/60 # per minute
data['transcript'] = data['transcript'].fillna('')
data['wc_per_min'] = data['transcript'].apply(lambda x: len(x.split()))/data['duration']
data.head()
data.shape
nlp = spacy.load('en')
feats = ['char_count', 'word_count', 'word_count_cln',
'stopword_count', '_NOUN', '_VERB', '_ADP', '_ADJ', '_DET', '_PROPN',
'_INTJ', '_PUNCT', '_NUM', '_PRON', '_ADV', '_PART', '_amod', '_advmod', '_acl', '_relcl', '_advcl',
'_neg','_PERSON','_NORP','_FAC','_ORG','_GPE','_LOC','_PRODUCT','_EVENT','_WORK_OF_ART','_LANGUAGE']
class text_features:
def __init__(self, df, textcol):
self.df = df
self.textcol = textcol
self.c = "spacy_" + textcol
self.df[self.c] = self.df[self.textcol].apply( lambda x : nlp(x))
self.pos_tags = ['NOUN', 'VERB', 'ADP', 'ADJ', 'DET', 'PROPN', 'INTJ', 'PUNCT',\
'NUM', 'PRON', 'ADV', 'PART']
self.dep_tags = ['amod', 'advmod', 'acl', 'relcl', 'advcl','neg']
self.ner_tags = ['PERSON','NORP','FAC','ORG','GPE','LOC','PRODUCT','EVENT','WORK_OF_ART','LANGUAGE']
def _spacy_cleaning(self, doc):
tokens = [token for token in doc if (token.is_stop == False)\
and (token.is_punct == False)]
words = [token.lemma_ for token in tokens]
return " ".join(words)
def _spacy_features(self):
self.df["clean_text"] = self.df[self.c].apply(lambda x : self._spacy_cleaning(x))
self.df["char_count"] = self.df[self.textcol].apply(len)
self.df["word_count"] = self.df[self.c].apply(lambda x : len([_ for _ in x]))
self.df["word_count_cln"] = self.df["clean_text"].apply(lambda x : len(x.split()))
self.df["stopword_count"] = self.df[self.c].apply(lambda x :
len([_ for _ in x if _.is_stop]))
self.df["pos_tags"] = self.df[self.c].apply(lambda x :
dict(Counter([_.head.pos_ for _ in x])))
self.df["dep_tags"] = self.df[self.c].apply(lambda x :
dict(Counter([_.dep_ for _ in x])))
self.df["ner_tags"] = self.df[self.c].apply(lambda x :
dict(Counter([_.ent_type_ for _ in x])))
def _flatten_features(self):
for key in self.pos_tags:
self.df["_" + key] = self.df["pos_tags"].apply(lambda x : \
x[key] if key in x else 0)
for key in self.dep_tags:
self.df["_" + key] = self.df["dep_tags"].apply(lambda x : \
x[key] if key in x else 0)
for key in self.ner_tags:
self.df["_" + key] = self.df["ner_tags"].apply(lambda x : \
x[key] if key in x else 0)
def generate_features(self):
self._spacy_features()
self._flatten_features()
self.df = self.df.drop([self.c, "pos_tags", "dep_tags", 'ner_tags',"clean_text"], axis=1)
return self.df
def spacy_features(df, tc):
fe = text_features(df, tc)
return fe.generate_features()
textcol = "transcript"
transcript_features = spacy_features(data, textcol)
transcript_features[[textcol] + feats].head()
data['transcript'].str.count("(Laughter)")
data['transcript'].str.count("(Applause)")
data['laughter_count']=data['transcript'].str.count("(Laughter)")
data['applaud_count']=data['transcript'].str.count("(Applause)")
data.head()
###Output
_____no_output_____ |
examples/notebooks/Distribute_Generic_Functions.ipynb | ###Markdown
Distribute functions across a BigQuery dataset using Spark Problem: As a PM, I give lots of public presentations and I want to make sure I use images that have an open license BigQuery Public Datasets - Open Images: 9 million URLs of open images (with labels across 6,000 categories) For smaller datasets, can use BigQuery magic and python
###Code
%reload_ext google.cloud.bigquery
%%bigquery pd_results --use_bqstorage_api
SELECT original_url, title
FROM `bigquery-public-data.open_images.images`
WHERE license = 'https://creativecommons.org/licenses/by/2.0/'
LIMIT 10
#review what our image database contains.
import pandas as pd
pd.set_option('display.max_colwidth', None)
pd_results.head()
###Output
_____no_output_____
###Markdown
Looks like a great set of images but how do I find what I need? What's a DSC-4918?
###Code
#function that makes is super easy to abstract some high confidence labels about my image.
from google.cloud import vision
def AnnotateHighConfidenceLabelsFromImage(image_uri):
client = vision.ImageAnnotatorClient()
request = {
'image': {
'source': {'image_uri': image_uri},
},
}
response = client.annotate_image(request)
high_confidence_labels = []
for la in response.label_annotations:
if float(la.score * 100) > 90.0:
high_confidence_labels.append(la.description)
if len(high_confidence_labels) < 1:
high_confidence_labels.append("No labels detected")
return str(high_confidence_labels)
#for 10 images, no problem to simply loop through them to get the labels.
for image in pd_results['original_url']:
labels = AnnotateHighConfidenceLabelsFromImage(image)
print(labels)
###Output
['No labels detected']
['No labels detected']
['Branch', 'Organism', 'Plant community', 'Monochrome photography', 'Monochrome', 'Adaptation']
['Arm', 'Finger', 'People', 'Comfort', 'Hand', 'Child']
['Electronic device', 'Furniture', 'Technology', 'Table', 'Laptop', 'Computer accessory', 'Computer']
['Branch', 'Twig', 'Adaptation', 'Woody plant']
['Organism', 'Bird']
['Atmosphere', 'Cloud', 'Atmospheric phenomenon']
['Dog breed', 'Dog', 'Carnivore', 'Mammal']
['Text', 'White', 'Line', 'Font', 'Colorfulness']
###Markdown
Expanding to the full corpus of images will require scaling with Spark
###Code
#but what happens when I need to run that label extractor against the full dataset of images.
no_limit_query = "SELECT original_url, title FROM `bigquery-public-data.open_images.images` WHERE license = 'https://creativecommons.org/licenses/by/2.0/' LIMIT 100"
# use Spark to load full dataset into Spark Dataframe. Setup Spark Session with BQ storage connector
from pyspark.sql import SparkSession
spark = SparkSession.builder.config("spark.jars.packages", "com.google.cloud.spark:spark-bigquery-with-dependencies_2." + str(12) + ":0.18.0") \
.enableHiveSupport() \
.getOrCreate()
#Use this function to push the processing of the query back to BQ but still use BQ Storage Connector to
#pull back data in parallel and directly into a Spark DF that can handle the size.
from google.cloud import bigquery
from pyspark import StorageLevel
def bq2df(QUERY):
bq = bigquery.Client()
query_job = bq.query(QUERY)
query_job.result()
df = spark.read.format('bigquery') \
.option('dataset', query_job.destination.dataset_id) \
.load(query_job.destination.table_id) \
.persist(StorageLevel.MEMORY_AND_DISK)
return df
df = bq2df(no_limit_query)
print(df.count())
df.printSchema()
#I'm now going to Spark-ify my python function with no code changes.
from pyspark.sql.functions import udf
@udf("string")
def AnnotateHighConfidenceLabelsFromImage_UDF(image_uri):
from google.cloud import vision
client = vision.ImageAnnotatorClient()
request = {
'image': {
'source': {'image_uri': image_uri},
},
}
response = client.annotate_image(request)
high_confidence_labels = []
for la in response.label_annotations:
if float(la.score * 100) > 90.0:
high_confidence_labels.append(la.description)
if len(high_confidence_labels) < 1:
high_confidence_labels.append("No labels detected")
return str(high_confidence_labels)
df_results = df.select("original_url", "title",\
AnnotateHighConfidenceLabelsFromImage_UDF("original_url").alias("labels"))\
.cache()
#at this point, might make sense to save this table out to my hive metastore to avoid re-processing all the images
#df_results.write.saveAsTable("HighConfidenceLabelsAndImages")
df_results.show(10, truncate=False)
from pyspark.sql.functions import col
df_results.where(col("labels").contains("Bird")).show(truncate=False)
###Output
+----------------------------------------------------------------+-------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|original_url |title |labels |
+----------------------------------------------------------------+-------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|https://farm4.staticflickr.com/1152/532400494_d65f8b7970_o.jpg |DSC_0451 |['Organism', 'Bird'] |
|https://farm5.staticflickr.com/3711/11060890716_08737a2dd7_o.jpg|Pale-breasted spinetail brasso seco nov2013|['Branch', 'Bird', 'Twig'] |
|https://c4.staticflickr.com/1/31/48416653_90f005725b_o.jpg |Thousand Oaks as seen from Ladyface Peak |['Landscape', 'Residential area', 'Aerial photography', 'Atmospheric phenomenon', "Bird's-eye view", 'Suburb', 'Plain'] |
|https://c1.staticflickr.com/1/33/66496718_d17cac35c8_o.jpg |Canyon Lands |['Branch', 'Sky', 'Twig', 'Bird'] |
|https://c7.staticflickr.com/9/8616/16415108690_51ec731c1f_o.jpg |Sarphatipark @ De Pijp @ Amsterdam |['Body of water', 'Water', 'Vertebrate', 'Bird', 'Ducks, geese and swans', 'Pond', 'Waterway', 'Water bird', 'Channel', 'Watercourse']|
+----------------------------------------------------------------+-------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------+
|
03_Combine_DFs_and_Munge.ipynb | ###Markdown
The objective of this notebook is:- Combine all data gathered so far- Only consider inforamation from 2009 and onwards- Drop duplicate CVEs- Remove "REJECT" entires and duplicates- Engineer features ready for machine learning algorithms- Split between train/test dataItems such as NLP text to numbers and normalization can only be done after train/test/split.
###Code
import pandas as pd
import numpy as np
import glob
import os
import munge_help
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.class_weight import compute_class_weight
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
#from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix,plot_roc_curve,plot_precision_recall_curve,plot_confusion_matrix, classification_report
#from plot_help import plot_confusion_matrix
from nltk.stem import WordNetLemmatizer
import utils
import xgboost as xgb
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Why lemmatization and not stemming?The goal of both stemming and lemmatization is to reduce inflectional forms and sometimes derivationally related forms of a word to a common base form.However, the two words differ in their flavor. Stemming usually refers to a crude heuristic process that chops off the ends of words in the hope of achieving this goal correctly most of the time, and often includes the removal of derivational affixes. Lemmatization usually refers to doing things properly with the use of a vocabulary and morphological analysis of words, normally aiming to remove inflectional endings only and to return the base or dictionary form of a word, which is known as the lemma .source:https://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html NVD Data
###Code
nvd_data_path = os.path.join('data', 'nvdcve_combined.csv')
df_nvd = pd.read_csv(nvd_data_path)
#fix numbers that are strings
df_nvd = munge_help.infer_dtypes(df_nvd)
# from none/low/high to 0/1/2
df_nvd = munge_help.categorical_to_numerical(df_nvd)
df_nvd.head()
#assert no missing values
assert df_nvd.isna().mean().sum()== 0
###Output
_____no_output_____
###Markdown
ExploitDB Data
###Code
edb_data_path = os.path.join('data', 'exploitdb_metadata.csv')
edb_df = pd.read_csv(edb_data_path, index_col=0)
edb_df = munge_help.exploit_db_munger(edb_df)
edb_df.head()
assert edb_df.isna().mean().sum()==0
edb_df.shape
###Output
_____no_output_____
###Markdown
Exploited in Wild
###Code
wild_data_path = os.path.join('data', 'target_cve.csv')
df_target = pd.read_csv(wild_data_path)
df_target.head()
###Output
_____no_output_____
###Markdown
Combine
###Code
#join nvd and edb
df_join = pd.merge(left=df_nvd,
right=edb_df,
how='left',
left_on='ID',
right_on='CVE')
#drop right column after join
df_join = df_join.drop(columns=['CVE'])
#join next on target
df_join = pd.merge(left=df_join,
right=df_target,
how='left',
left_on='ID',
right_on='ID')
df_join.columns[df_join.isnull().any()]
###Output
_____no_output_____
###Markdown
A lot of the NaN columns stem from the exploitdb type. In case of doubt we opt to fill those wih zeroes.
###Code
df_join = df_join.fillna(value=0)
# TODO: only use dates 2009 and after
# TODO: text to ngram
# TODO: standardaize num values
# TODO: look at correlation coefficients
###Output
_____no_output_____
###Markdown
Cleaning up Joined DF Only published on or after 2009
###Code
#make a deep copy
df_2009 = df_join.copy(deep=True)
#get year CVE was published
df_2009['year'] = df_2009['publishedDate'].dt.year
#get only years after 2009
df_2009 = df_2009[df_2009['year']>=2009]
#drop years column
df_2009 = df_2009.drop(columns=['year'])
###Output
_____no_output_____
###Markdown
Drop duplicate CVEs
###Code
#make a deep copy
df_dup = df_2009.copy(deep=True)
#drop duplicate IDs
df_dup = df_dup.drop_duplicates(subset='ID')
print("Rows before dropping dups = {}".format(df_2009.shape[0]))
print("Rows before dropping dups = {}".format(df_dup.shape[0]))
###Output
Rows before dropping dups = 118957
Rows before dropping dups = 115798
###Markdown
Remove all CVEs that have been rejected by the description itself.
###Code
#name it accept so that it contains only accepted entries after cleanup
df_accept = df_dup.copy(deep=True)
#see how many CVEs are rejected
df_accept[df_accept['description'].str.contains("REJECT")].shape
# see if we have CVEs that are rejected AND are exploited in the wild
# call this a contradiction
df_contradiction = df_accept[(df_accept['description'].str.contains("REJECT")) \
& (df_accept['in_the_wild'] == 1)]
df_contradiction.shape
df_contradiction['description']
###Output
_____no_output_____
###Markdown
Since the number of rows that are both rejected and we think are exploited in the wild is low (5), we decide to drop these rows.
###Code
#keep only accepted entires
df_accept = df_accept[~df_accept['description'].str.contains("REJECT")]
###Output
_____no_output_____
###Markdown
Remove object columns that we won't need
###Code
obj_columns = df_accept.select_dtypes(exclude='number').columns
print("Columns that are non-numeric:\n{}".format(obj_columns))
#make a deep copy
df_num = df_accept.copy(deep=True)
#drop columns that we won't need
#note that we keep the description column
df_num = df_num.drop(columns=['ID', 'cwe_val', 'vectorString_V3',
'vectorString_V2','publishedDate', 'lastModifiedDate',
'url', 'Date'])
df_num.shape
###Output
_____no_output_____
###Markdown
Train Test Split
###Code
X = df_num.drop(columns=['in_the_wild'])
y = df_num['in_the_wild']
print(X.shape)
y.mean()
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.05,
stratify=y,
random_state=42)
#quick check that stratification worked
#check how positive in train
print("Percent explited in wild in train data... {:.3%}".format(y_train.mean()))
#check how positive in test
print("Percent explited in wild in test data... {:.3%}".format(y_test.mean()))
###Output
Percent explited in wild in train data... 1.113%
Percent explited in wild in test data... 1.112%
###Markdown
NLP Pipeline Feature Extractionmake use of TfidfVectorizer:Convert a collection of text documents to a matrix of token counts.This sklearn package is useful because it handles a lot of the preprocessing for us such as make everything lower.
###Code
#isolate the descriptions
description_train_raw = X_train['description']
#save the text column
utils.save_obj(obj=description_train_raw, path=os.path.join('data_processed', 'description_train_raw.pkl'))
#repeat for test data
description_test_raw = X_test['description']
#save the text column
utils.save_obj(obj=description_test_raw, path=os.path.join('data_processed', 'description_test_raw.pkl'))
#instantiate the vectorizier
# note that ngram only applies if analyzer is not callable.
vectorizer = TfidfVectorizer(encoding='utf-8',
decode_error='strict',
strip_accents='ascii', #remove accents
lowercase=True, #make everything lowercase before vectorizing
preprocessor=None,
tokenizer=None,
analyzer='word', #feature to be made of word ngrams
stop_words='english', #remove english stopwords
#token_pattern='(?u)\b\w\w+\b', #keep this one default
ngram_range=(1, 3),
max_df=.8, #if appears in more than this percent don't use it
min_df=10, #ignore terms that have a document frequency strictly lower than the given threshold
max_features=100, #arbitrary number
vocabulary=None,
binary=False,
norm='l2',
use_idf=True,
smooth_idf=True,
sublinear_tf=False)
vectorizer.fit(X_train['description'])
#see what the token pattern is
#note that it is commented out when we instantiate the vectorizer
#it could be a bug
vectorizer.token_pattern
#see what's in our vocab
vectorizer.get_feature_names()
###Output
_____no_output_____
###Markdown
Data Preprocessing preprocess train data
###Code
#instantiate scaler
scaler = MinMaxScaler(feature_range = (0,1))
#get numerical features
X_train_num = X_train.drop(columns='description')
#fit to train numerical data
scaler.fit(X_train_num)
#transform
X_train_num = scaler.transform(X_train_num)
#isolate text column
X_train_text = X_train['description']
#apply tfidf transform
X_train_text = vectorizer.transform(X_train_text)
###Output
_____no_output_____
###Markdown
preprocess test data
###Code
#get numerical features
X_test_num = X_test.drop(columns='description')
#scale test data
X_test_num = scaler.transform(X_test_num)
#isolate text column
X_test_text = X_test['description']
#apply tfidf transform
X_test_text = vectorizer.transform(X_test_text)
#check shape
X_train_num.shape
X_train_text.shape
###Output
_____no_output_____
###Markdown
How can we make tfidf vocab smaller?
###Code
#make a dataframe of idf weights
df_idf = pd.DataFrame(vectorizer.idf_, index=vectorizer.get_feature_names(), columns=['idf_weights'])
df_idf = df_idf.sort_values(by=['idf_weights'], ascending=False)
df_idf.head(10)
df_idf.tail(10)
###Output
_____no_output_____
###Markdown
Save data and artifacts
###Code
#train data
utils.save_obj(obj = X_train,
path = os.path.join('data_processed', 'X_train.pkl'))
#train data
utils.save_obj(obj = y_train,
path = os.path.join('data_processed', 'y_train.pkl'))
utils.save_obj(obj = X_train_num,
path = os.path.join('data_processed', 'X_train_num.pkl'))
utils.save_obj(obj = X_train_text,
path = os.path.join('data_processed', 'X_train_text.pkl'))
#test data
utils.save_obj(obj = X_test,
path = os.path.join('data_processed', 'X_test.pkl'))
#test data
utils.save_obj(obj = y_test,
path = os.path.join('data_processed', 'y_test.pkl'))
utils.save_obj(obj = X_test_num,
path = os.path.join('data_processed', 'X_test_num.pkl'))
utils.save_obj(obj = X_test_text,
path = os.path.join('data_processed', 'X_test_text.pkl'))
#transformers
utils.save_obj(obj = vectorizer,
path = os.path.join('artifacts', 'vectorizer_tfidf.pkl'))
utils.save_obj(obj = scaler,
path = os.path.join('artifacts', 'minmax_scaler.pkl'))
###Output
_____no_output_____
###Markdown
Q: Are numeric features alone sufficient? Logistic Regression
###Code
#instantiate with default params
lr = LogisticRegression(penalty='l2',
dual=False,
tol=0.0001,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight='balanced',
random_state=None,
solver='lbfgs',
max_iter=100,
multi_class='auto',
verbose=0,
warm_start=False,
n_jobs=-1,
l1_ratio=None)
lr.fit(X_train_num, y_train)
y_lr_num = lr.predict(X_test_num)
utils.metric_evaluation(lr, X_train_num, X_test_num, y_test, 'Logistic Regression Numbers Only')
###Output
Confusion Matrix in array form
[[4396 939]
[ 19 41]]
####################
###Markdown
Naive Bayes
###Code
nb = GaussianNB(priors=None,
var_smoothing=1e-09)
nb.fit(X_train_num, y_train)
y_nb_num=nb.predict(X_test_num)
utils.metric_evaluation(nb, X_train_num, X_test_num, y_test, title = 'Naive Bayes Numbers Only')
###Output
Confusion Matrix in array form
[[ 139 5196]
[ 0 60]]
####################
###Markdown
Random forest
###Code
rf = RandomForestClassifier(n_estimators=100,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='sqrt',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=-1,
random_state=None,
verbose=0,
warm_start=False,
class_weight='balanced',
ccp_alpha=0.0,
max_samples=None)
rf.fit(X_train_num, y_train)
y_rf_num = rf.predict(X_test_num)
utils.metric_evaluation(rf, X_train_num, X_test_num, y_test, title = 'Random Forest Numbers Only')
###Output
Confusion Matrix in array form
[[4811 524]
[ 29 31]]
####################
###Markdown
GBTree
###Code
gb = GradientBoostingClassifier(loss='deviance',
learning_rate=0.1,
n_estimators=100,
subsample=1.0,
criterion='friedman_mse',
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=3,
min_impurity_decrease=0.0,
min_impurity_split=None,
init=None,
random_state=None,
max_features='sqrt',
verbose=0,
max_leaf_nodes=None,
warm_start=False,
presort='deprecated',
validation_fraction=0.1,
n_iter_no_change=None,
tol=0.0001,
ccp_alpha=0.0)
gb.fit(X_train_num, y_train)
y_gb_num = gb.predict(X_test_num)
utils.metric_evaluation(gb, X_train_num, X_test_num, y_test, title = 'GB Tree Numbers Only')
###Output
Confusion Matrix in array form
[[5329 6]
[ 59 1]]
####################
###Markdown
SVM
###Code
# svc = SVC( C=1.0,
# kernel='rbf',
# degree=3,
# gamma='scale',
# coef0=0.0,
# shrinking=True,
# probability=False,
# tol=0.001,
# cache_size=200,
# class_weight='balanced',
# verbose=False,
# max_iter=-1,
# decision_function_shape='ovr',
# break_ties=False,
# random_state=None)
# svc.fit(X_train_num, y_train)
# y_svc_num = svc.predict(X_test_num)
# confusion_matrix(y_test, y_svc_num)
#utils.metric_evaluation(svc, X_train_num, X_test_num, y_test)
###Output
_____no_output_____
###Markdown
XGBoostadapted from here: https://www.kdnuggets.com/2017/03/simple-xgboost-tutorial-iris-dataset.html
###Code
bst = xgb.XGBClassifier(n_estimators=100,
colsample_bytree=0.9,
eta=0.9,
max_depth=6,
num_boost_round=10,
subsample=0.9,
n_jobs=-1)
bst.fit(X_train_num, y_train)
y_xgb_num = bst.predict(X_test_num)
utils.metric_evaluation(bst, X_train_num, X_test_num, y_test, title = 'XGBoost Numbers Only')
###Output
Confusion Matrix in array form
[[5324 11]
[ 55 5]]
####################
###Markdown
Putting all the plots together
###Code
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
lr_roc = plot_roc_curve(lr, X_test_num, y_test, name='LogReg', ax=ax)
nb_roc = plot_roc_curve(nb, X_test_num, y_test, name='NaiveBayes', ax=ax)
rf_roc = plot_roc_curve(rf, X_test_num, y_test, name='RandomForest', ax=ax)
gb_roc = plot_roc_curve(gb, X_test_num, y_test, name='GBTree', ax=ax)
xgb_roc = plot_roc_curve(bst, X_test_num, y_test, name='XGBoost', ax=ax)
plt.title("ROC Curve for Numerical Features Only")
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
lr_prc = plot_precision_recall_curve(lr, X_test_num, y_test, name='LogReg', ax=ax)
nb_prc = plot_precision_recall_curve(nb, X_test_num, y_test, name='NaiveBayes', ax=ax)
rf_prc = plot_precision_recall_curve(rf, X_test_num, y_test, name='RandomForest', ax=ax)
gb_prc = plot_precision_recall_curve(gb, X_test_num, y_test, name='GBTree', ax=ax)
xgb_prc = plot_precision_recall_curve(bst, X_test_num, y_test, name='XGBoost', ax=ax)
ax.legend(loc='upper right')
plt.title("PRC Curve for Numerical Features")
###Output
_____no_output_____
###Markdown
A: Numerical Features Alone Probably Not Enough If we use numerical data alone, performance ranges from inaccurate to abyssimal. We are unable to provide a solution that offers enough coverage of critical cases while also being efficient. In the subsequent notebook we explore using the text data for classifiers. Q: Are NLP Features alone enough? Logistic Regression
###Code
#instantiate with default params
lr_text = LogisticRegression(penalty='l2',
dual=False,
tol=0.0001,
C=1.0,
fit_intercept=True,
intercept_scaling=1,
class_weight='balanced',
random_state=None,
solver='lbfgs',
max_iter=100,
multi_class='auto',
verbose=0,
warm_start=False,
n_jobs=-1,
l1_ratio=None)
#fit to text only features
lr_text.fit(X_train_text, y_train)
utils.metric_evaluation(lr_text,
X_train_text,
X_test_text,
y_test,
title = 'Logistic Regression NLP Only')
###Output
Confusion Matrix in array form
[[4114 1221]
[ 19 41]]
####################
###Markdown
Naive Bayes
###Code
nb_text = GaussianNB(priors=None,
var_smoothing=1e-09)
nb_text.fit(X_train_text.toarray(), y_train)
y_nb_text = nb_text.predict(X_test_text.toarray())
utils.metric_evaluation(nb_text,
X_train_text.toarray(),
X_test_text.toarray(),
y_test,
title = 'Naive Bayes NLP Only')
###Output
Confusion Matrix in array form
[[3563 1772]
[ 14 46]]
####################
###Markdown
Random Forest
###Code
rf_text = RandomForestClassifier(n_estimators=100,
criterion='gini',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features='sqrt',
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=-1,
random_state=None,
verbose=0,
warm_start=False,
class_weight='balanced',
ccp_alpha=0.0,
max_samples=None)
rf_text.fit(X_train_text, y_train)
y_rf_text = rf_text.predict(X_test_text)
utils.metric_evaluation(rf_text,
X_train_text.toarray(),
X_test_text.toarray(),
y_test,
title = 'Random Forest NLP Only')
###Output
Confusion Matrix in array form
[[5248 87]
[ 52 8]]
####################
###Markdown
GB Tree
###Code
gb_text = GradientBoostingClassifier(loss='deviance',
learning_rate=0.1,
n_estimators=100,
subsample=1.0,
criterion='friedman_mse',
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_depth=3,
min_impurity_decrease=0.0,
min_impurity_split=None,
init=None,
random_state=None,
max_features='sqrt',
verbose=0,
max_leaf_nodes=None,
warm_start=False,
presort='deprecated',
validation_fraction=0.1,
n_iter_no_change=None,
tol=0.0001,
ccp_alpha=0.0)
gb_text.fit(X_train_text, y_train)
y_gb_text = gb_text.predict(X_test_text)
utils.metric_evaluation(gb_text,
X_train_text.toarray(),
X_test_text.toarray(),
y_test,
title = 'GB Tree NLP Only')
###Output
Confusion Matrix in array form
[[5331 4]
[ 60 0]]
####################
###Markdown
XGB
###Code
bst_text = xgb.XGBClassifier(n_estimators=100,
colsample_bytree=0.9,
eta=0.9,
max_depth=6,
num_boost_round=10,
subsample=0.9,
n_jobs=-1)
bst_text.fit(X_train_text, y_train)
utils.metric_evaluation(bst_text,
X_train_text.toarray(),
X_test_text.toarray(),
y_test,
title = 'XGB NLP Only')
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
lr_roc = plot_roc_curve(lr_text, X_test_text, y_test, name='LogReg', ax=ax)
nb_roc = plot_roc_curve(nb_text, X_test_text.toarray(), y_test, name='NaiveBayes', ax=ax)
rf_roc = plot_roc_curve(rf_text, X_test_text, y_test, name='RandomForest', ax=ax)
gb_roc = plot_roc_curve(gb_text, X_test_text, y_test, name='GBTree', ax=ax)
xgb_roc = plot_roc_curve(bst_text, X_test_text, y_test, name='XGBoost', ax=ax)
plt.title("ROC Curve for NLP Features Only")
fig = plt.figure(figsize=(8,8))
ax = fig.gca()
lr_prc = plot_precision_recall_curve(lr_text, X_test_text, y_test, name='LogReg', ax=ax)
nb_prc = plot_precision_recall_curve(nb_text, X_test_text.toarray(), y_test, name='NaiveBayes', ax=ax)
rf_prc = plot_precision_recall_curve(rf_text, X_test_text, y_test, name='RandomForest', ax=ax)
gb_prc = plot_precision_recall_curve(gb_text, X_test_text, y_test, name='GBTree', ax=ax)
xgb_prc = plot_precision_recall_curve(bst_text, X_test_text, y_test, name='XGBoost', ax=ax)
ax.legend(loc='upper right')
plt.title("PRC Curve for Numerical Features")
###Output
_____no_output_____ |
notebooks/BLS model/BLS model - balance deflection.ipynb | ###Markdown
Bilayer Sonophore model: computation of balance quasi-static deflection Imports
###Code
import numpy as np
import matplotlib.pyplot as plt
from PySONIC.core import BilayerSonophore
###Output
_____no_output_____
###Markdown
Functions
###Code
def plotZeq(bls, ng_range, Q_range, fs=15):
fig, ax = plt.subplots(figsize=(15, 4))
ax.set_xlabel('$Q_m\ (nC/cm^2)$', fontsize=fs)
ax.set_ylabel('$Z_{eq}\ (nm)$', fontsize=fs)
for ng in ng_range:
ZeqQS = np.array([bls.balancedefQS(ng, Q) for Q in Q_range])
ax.plot(Q_range * 1e5, ZeqQS * 1e9, label=f'ng = {(ng * 1e22):.2f}e-22 mole')
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(fs)
for key in ['top', 'right']:
ax.spines[key].set_visible(False)
ax.legend(fontsize=fs, loc='center right', bbox_to_anchor=(1.8, 0.5), frameon=False)
fig.tight_layout()
return fig
###Output
_____no_output_____
###Markdown
Parameters
###Code
a = 32e-9 # in-plane radius (m)
Cm0 = 1e-2
Qm0 = -71.9e-5
bls = BilayerSonophore(a, Cm0, Qm0)
charges = np.linspace(-80, 40, 200) * 1e-5
gas = np.linspace(0.5 * bls.ng0, 2.0 * bls.ng0, 5)
###Output
_____no_output_____
###Markdown
Balance deflections
###Code
fig = plotZeq(bls, gas, charges)
###Output
_____no_output_____ |
d2l-en/tensorflow/chapter_optimization/convexity.ipynb | ###Markdown
Convexity:label:`sec_convexity`Convexity plays a vital role in the design of optimization algorithms. This is largely due to the fact that it is much easier to analyze and test algorithms in this context. In other words, if the algorithm performs poorly even in the convex setting we should not hope to see great results otherwise. Furthermore, even though the optimization problems in deep learning are generally nonconvex, they often exhibit some properties of convex ones near local minima. This can lead to exciting new optimization variants such as :cite:`Izmailov.Podoprikhin.Garipov.ea.2018`. BasicsLet us begin with the basics. SetsSets are the basis of convexity. Simply put, a set $X$ in a vector space is convex if for any $a, b \in X$ the line segment connecting $a$ and $b$ is also in $X$. In mathematical terms this means that for all $\lambda \in [0, 1]$ we have$$\lambda \cdot a + (1-\lambda) \cdot b \in X \text{ whenever } a, b \in X.$$This sounds a bit abstract. Consider the picture :numref:`fig_pacman`. The first set is not convex since there are line segments that are not contained in it. The other two sets suffer no such problem.:label:`fig_pacman`Definitions on their own are not particularly useful unless you can do something with them. In this case we can look at unions and intersections as shown in :numref:`fig_convex_intersect`. Assume that $X$ and $Y$ are convex sets. Then $X \cap Y$ is also convex. To see this, consider any $a, b \in X \cap Y$. Since $X$ and $Y$ are convex, the line segments connecting $a$ and $b$ are contained in both $X$ and $Y$. Given that, they also need to be contained in $X \cap Y$, thus proving our first theorem.:label:`fig_convex_intersect`We can strengthen this result with little effort: given convex sets $X_i$, their intersection $\cap_{i} X_i$ is convex.To see that the converse is not true, consider two disjoint sets $X \cap Y = \emptyset$. Now pick $a \in X$ and $b \in Y$. The line segment in :numref:`fig_nonconvex` connecting $a$ and $b$ needs to contain some part that is neither in $X$ nor $Y$, since we assumed that $X \cap Y = \emptyset$. Hence the line segment is not in $X \cup Y$ either, thus proving that in general unions of convex sets need not be convex.:label:`fig_nonconvex`Typically the problems in deep learning are defined on convex domains. For instance $\mathbb{R}^d$ is a convex set (after all, the line between any two points in $\mathbb{R}^d$ remains in $\mathbb{R}^d$). In some cases we work with variables of bounded length, such as balls of radius $r$ as defined by $\{\mathbf{x} | \mathbf{x} \in \mathbb{R}^d \text{ and } \|\mathbf{x}\|_2 \leq r\}$. FunctionsNow that we have convex sets we can introduce convex functions $f$. Given a convex set $X$ a function defined on it $f: X \to \mathbb{R}$ is convex if for all $x, x' \in X$ and for all $\lambda \in [0, 1]$ we have$$\lambda f(x) + (1-\lambda) f(x') \geq f(\lambda x + (1-\lambda) x').$$To illustrate this let us plot a few functions and check which ones satisfy the requirement. We need to import a few libraries.
###Code
%matplotlib inline
from d2l import tensorflow as d2l
import numpy as np
from mpl_toolkits import mplot3d
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Let us define a few functions, both convex and nonconvex.
###Code
f = lambda x: 0.5 * x**2 # Convex
g = lambda x: tf.cos(np.pi * x) # Nonconvex
h = lambda x: tf.exp(0.5 * x) # Convex
x, segment = tf.range(-2, 2, 0.01), tf.constant([-1.5, 1])
d2l.use_svg_display()
_, axes = d2l.plt.subplots(1, 3, figsize=(9, 3))
for ax, func in zip(axes, [f, g, h]):
d2l.plot([x, segment], [func(x), func(segment)], axes=ax)
###Output
_____no_output_____
###Markdown
As expected, the cosine function is nonconvex, whereas the parabola and the exponential function are. Note that the requirement that $X$ is a convex set is necessary for the condition to make sense. Otherwise the outcome of $f(\lambda x + (1-\lambda) x')$ might not be well defined. Convex functions have a number of desirable properties. Jensen's InequalityOne of the most useful tools is Jensen's inequality. It amounts to a generalization of the definition of convexity:$$\begin{aligned} \sum_i \alpha_i f(x_i) & \geq f\left(\sum_i \alpha_i x_i\right) \text{ and } E_x[f(x)] & \geq f\left(E_x[x]\right),\end{aligned}$$where $\alpha_i$ are nonnegative real numbers such that $\sum_i \alpha_i = 1$. In other words, the expectation of a convex function is larger than the convex function of an expectation. To prove the first inequality we repeatedly apply the definition of convexity to one term in the sum at a time. The expectation can be proven by taking the limit over finite segments.One of the common applications of Jensen's inequality is with regard to the log-likelihood of partially observed random variables. That is, we use$$E_{y \sim P(y)}[-\log P(x \mid y)] \geq -\log P(x).$$This follows since $\int P(y) P(x \mid y) dy = P(x)$.This is used in variational methods. Here $y$ is typically the unobserved random variable, $P(y)$ is the best guess of how it might be distributed and $P(x)$ is the distribution with $y$ integrated out. For instance, in clustering $y$ might be the cluster labels and $P(x \mid y)$ is the generative model when applying cluster labels. PropertiesConvex functions have a few useful properties. We describe them as follows. Local Minima is Global MinimaIn particular, the local minima for convex functions is also the global minima. Let us assume the contrary and prove it wrong. If $x^{\ast} \in X$ is a local minimum such that there is a small positive value $p$ so that for $x \in X$ that satisfies $0 < |x - x^{\ast}| \leq p$ there is $f(x^{\ast}) < f(x)$. Assume there exists $x' \in X$ for which $f(x') < f(x^{\ast})$. According to the property of convexity, $$\begin{aligned} f(\lambda x^{\ast} + (1-\lambda) x') &\leq \lambda f(x^{\ast}) + (1-\lambda) f(x') \\ &< \lambda f(x^{\ast}) + (1-\lambda) f(x^{\ast}) \\ &< f(x^{\ast}) \\\end{aligned}$$There exists $\lambda \in [0, 1)$, $\lambda = 1 - \frac{p}{|x^{\ast} - x'|}$ for an example, so that $0 < |\lambda x^{\ast} + (1-\lambda) x' - x^{\ast}| \leq p$. However, because $f(\lambda x^{\ast} + (1-\lambda) x') < f(x^{\ast})$, this violates our local minimum statement. Therefore, there does not exist $x' \in X$ for which $f(x') < f(x^{\ast})$. The local minimum $x^{\ast}$ is also the global minimum.For instance, the function $f(x) = (x-1)^2$ has a local minimum for $x=1$, it is also the global minimum.
###Code
f = lambda x: (x-1)**2
d2l.set_figsize()
d2l.plot([x, segment], [f(x), f(segment)], 'x', 'f(x)')
###Output
_____no_output_____
###Markdown
The fact that the local minima for convex functions is also the global minima is very convenient. It means that if we minimize functions we cannot "get stuck". Note, though, that this does not mean that there cannot be more than one global minimum or that there might even exist one. For instance, the function $f(x) = \mathrm{max}(|x|-1, 0)$ attains its minimum value over the interval $[-1, 1]$. Conversely, the function $f(x) = \exp(x)$ does not attain a minimum value on $\mathbb{R}$. For $x \to -\infty$ it asymptotes to $0$, however there is no $x$ for which $f(x) = 0$. Convex Functions and SetsConvex functions define convex sets as *below-sets*. They are defined as$$S_b := \{x | x \in X \text{ and } f(x) \leq b\}.$$Such sets are convex. Let us prove this quickly. Remember that for any $x, x' \in S_b$ we need to show that $\lambda x + (1-\lambda) x' \in S_b$ as long as $\lambda \in [0, 1]$. But this follows directly from the definition of convexity since $f(\lambda x + (1-\lambda) x') \leq \lambda f(x) + (1-\lambda) f(x') \leq b$.Have a look at the function $f(x, y) = 0.5 x^2 + \cos(2 \pi y)$ below. It is clearly nonconvex. The level sets are correspondingly nonconvex. In fact, they are typically composed of disjoint sets.
###Code
x, y = tf.meshgrid(
tf.linspace(-1.0, 1.0, 101), tf.linspace(-1.0, 1.0, 101))
z = x**2 + 0.5 * tf.cos(2 * np.pi * y)
# Plot the 3D surface
d2l.set_figsize((6, 4))
ax = d2l.plt.figure().add_subplot(111, projection='3d')
ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10})
ax.contour(x, y, z, offset=-1)
ax.set_zlim(-1, 1.5)
# Adjust labels
for func in [d2l.plt.xticks, d2l.plt.yticks, ax.set_zticks]:
func([-1, 0, 1])
###Output
_____no_output_____
###Markdown
Derivatives and ConvexityWhenever the second derivative of a function exists it is very easy to check for convexity. All we need to do is check whether $\partial_x^2 f(x) \succeq 0$, i.e., whether all of its eigenvalues are nonnegative. For instance, the function $f(\mathbf{x}) = \frac{1}{2} \|\mathbf{x}\|^2_2$ is convex since $\partial_{\mathbf{x}}^2 f = \mathbf{1}$, i.e., its derivative is the identity matrix.The first thing to realize is that we only need to prove this property for one-dimensional functions. After all, in general we can always define some function $g(z) = f(\mathbf{x} + z \cdot \mathbf{v})$. This function has the first and second derivatives $g' = (\partial_{\mathbf{x}} f)^\top \mathbf{v}$ and $g'' = \mathbf{v}^\top (\partial^2_{\mathbf{x}} f) \mathbf{v}$ respectively. In particular, $g'' \geq 0$ for all $\mathbf{v}$ whenever the Hessian of $f$ is positive semidefinite, i.e., whenever all of its eigenvalues are greater equal than zero. Hence back to the scalar case.To see that $f''(x) \geq 0$ for convex functions we use the fact that$$\frac{1}{2} f(x + \epsilon) + \frac{1}{2} f(x - \epsilon) \geq f\left(\frac{x + \epsilon}{2} + \frac{x - \epsilon}{2}\right) = f(x).$$Since the second derivative is given by the limit over finite differences it follows that$$f''(x) = \lim_{\epsilon \to 0} \frac{f(x+\epsilon) + f(x - \epsilon) - 2f(x)}{\epsilon^2} \geq 0.$$To see that the converse is true we use the fact that $f'' \geq 0$ implies that $f'$ is a monotonically increasing function. Let $a < x < b$ be three points in $\mathbb{R}$. We use the mean value theorem to express$$\begin{aligned}f(x) - f(a) & = (x-a) f'(\alpha) \text{ for some } \alpha \in [a, x] \text{ and } \\f(b) - f(x) & = (b-x) f'(\beta) \text{ for some } \beta \in [x, b].\end{aligned}$$By monotonicity $f'(\beta) \geq f'(\alpha)$, hence$$\begin{aligned} f(b) - f(a) & = f(b) - f(x) + f(x) - f(a) \\ & = (b-x) f'(\beta) + (x-a) f'(\alpha) \\ & \geq (b-a) f'(\alpha).\end{aligned}$$By geometry it follows that $f(x)$ is below the line connecting $f(a)$ and $f(b)$, thus proving convexity. We omit a more formal derivation in favor of a graph below.
###Code
f = lambda x: 0.5 * x**2
x = tf.range(-2, 2, 0.01)
axb, ab = tf.constant([-1.5, -0.5, 1]), tf.constant([-1.5, 1])
d2l.set_figsize()
d2l.plot([x, axb, ab], [f(x) for x in [x, axb, ab]], 'x', 'f(x)')
d2l.annotate('a', (-1.5, f(-1.5)), (-1.5, 1.5))
d2l.annotate('b', (1, f(1)), (1, 1.5))
d2l.annotate('x', (-0.5, f(-0.5)), (-1.5, f(-0.5)))
###Output
_____no_output_____ |
mysite/static/ipython/4.MS-Face.ipynb | ###Markdown
**Face API**MicroSoft FACE api **1 MS API Key๊ฐ ์ถ์ถํ๊ธฐ**https://azure.microsoft.com/en-gb/try/cognitive-services/my-apis/?apiSlug=face-api&country=Korea&allowContact=true&fromLogin=True
###Code
key1 = ""
key2 = ""
###Output
_____no_output_____
###Markdown
**2 Response ๊ฐ์ฒด ๋ถ์ํด๋ณด๊ธฐ****Python Document**https://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python
###Code
response = [
{
"faceId": "35102aa8-4263-4139-bfd6-185bb0f52d88",
"faceRectangle": {
"top": 208,
"left": 228,
"width": 91,
"height": 91
},
"faceAttributes": {
"smile": 1,
"headPose": {
"pitch": 0,
"roll": 4.3,
"yaw": -0.3
},
"gender": "female",
"age": 27,
"facialHair": {
"moustache": 0,
"beard": 0,
"sideburns": 0
},
"glasses": "NoGlasses",
"emotion": {
"anger": 0,
"contempt": 0,
"disgust": 0,
"fear": 0,
"happiness": 1,
"neutral": 0,
"sadness": 0,
"surprise": 0
},
"blur": {
"blurLevel": "low",
"value": 0
},
"exposure": {
"exposureLevel": "goodExposure",
"value": 0.65
},
"noise": {
"noiseLevel": "low",
"value": 0
},
"makeup": {
"eyeMakeup": True,
"lipMakeup": True
},
"accessories": [],
"occlusion": {
"foreheadOccluded": False,
"eyeOccluded": False,
"mouthOccluded": False
},
"hair": {
"bald": 0.06,
"invisible": False,
"hairColor": [
{
"color": "brown",
"confidence": 1
},
{
"color": "blond",
"confidence": 0.5
},
{
"color": "black",
"confidence": 0.34
},
{
"color": "red",
"confidence": 0.32
},
{
"color": "gray",
"confidence": 0.14
},
{
"color": "other",
"confidence": 0.03
}
]
}
}
},
{
"faceId": "42502166-31bb-4ac8-81c0-a7adcb3b3e70",
"faceRectangle": {
"top": 109,
"left": 125,
"width": 79,
"height": 79
},
"faceAttributes": {
"smile": 1,
"headPose": {
"pitch": 0,
"roll": 1.7,
"yaw": 2.1
},
"gender": "male",
"age": 32,
"facialHair": {
"moustache": 0.4,
"beard": 0.4,
"sideburns": 0.4
},
"glasses": "NoGlasses",
"emotion": {
"anger": 0,
"contempt": 0,
"disgust": 0,
"fear": 0,
"happiness": 1,
"neutral": 0,
"sadness": 0,
"surprise": 0
},
"blur": {
"blurLevel": "low",
"value": 0.11
},
"exposure": {
"exposureLevel": "goodExposure",
"value": 0.74
},
"noise": {
"noiseLevel": "low",
"value": 0
},
"makeup": {
"eyeMakeup": False,
"lipMakeup": True
},
"accessories": [],
"occlusion": {
"foreheadOccluded": False,
"eyeOccluded": False,
"mouthOccluded": False
},
"hair": {
"bald": 0.02,
"invisible": False,
"hairColor": [
{
"color": "brown",
"confidence": 1
},
{
"color": "blond",
"confidence": 0.94
},
{
"color": "red",
"confidence": 0.76
},
{
"color": "gray",
"confidence": 0.2
},
{
"color": "other",
"confidence": 0.03
},
{
"color": "black",
"confidence": 0.01
}
]
}
}
}
]
# ๊ฐ์ฒด์ ์ ๊ณ์ฐํ๊ธฐ
len(response)
# 1๋ฒ ๊ฐ์ฒด์ Key๊ฐ ์ถ์ถํ๊ธฐ
response[0].keys()
face_key = list(response[0].keys())[2]
print(face_key)
response[0][face_key].keys()
response[0][face_key]['emotion']
###Output
_____no_output_____
###Markdown
**3 Python ์์ ๋ฐ๋ผํ๊ธฐ****Python Document**https://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python
###Code
# ๋ถ์์ ์ํ ์ด๋ฏธ์ง๋ฅผ ์ง์ ํ๋ค
image_url = 'https://how-old.net/Images/faces2/main007.jpg'
# ์์์ ์ถ์ถํ API๋ฅผ ์ฌ๊ธฐ์์ ์ฐ๊ฒฐํ๋ค
subscription_key = "์ฌ์ฉ์ key๊ฐ์ ์
๋ ฅํ์ธ์"
assert subscription_key
# ์๋์ API ์๋์ ํ์ํ ๋ชจ๋์ด ์๋์ง๋ฅผ ๋จผ์ ํ์ธํ๋ค
%matplotlib inline
import requests
import matplotlib.pyplot as plt
from PIL import Image
from matplotlib import patches
from io import BytesIO
# MS API๋ฅผ ์ถ์ถ
face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
}
data = {'url': image_url}
response = requests.post(face_api_url, params=params, headers=headers, json=data)
faces = response.json()
image = Image.open(BytesIO(requests.get(image_url).content))
plt.figure(figsize=(8, 8))
ax = plt.imshow(image, alpha=0.6)
for face in faces:
fr = face["faceRectangle"]
fa = face["faceAttributes"]
origin = (fr["left"], fr["top"])
p = patches.Rectangle(
origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b')
ax.axes.add_patch(p)
plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]),
fontsize=20, weight="bold", va="bottom")
_ = plt.axis("off")
###Output
_____no_output_____
###Markdown
**3 Emotion ์๋ฃ๋ง ์ถ์ถํ๊ธฐ**์์ Source ์์ Matplotlib ๋ถ๋ถ ์ ๊ฑฐํ๊ธฐhttps://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python
###Code
# MS API๋ฅผ ์ถ์ถ
face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'
}
data = {'url': image_url}
response = requests.post(face_api_url, params=params, headers=headers, json=data)
faces = response.json()
image = Image.open(BytesIO(requests.get(image_url).content))
# plt.figure(figsize=(8, 8))
# ax = plt.imshow(image, alpha=0.6)
emotion, people = [], []
for face in faces:
fr = face["faceRectangle"]
fa = face["faceAttributes"]
origin = (fr["left"], fr["top"])
p = patches.Rectangle(
origin, fr["width"], fr["height"], fill=False, linewidth=2, color='b')
# Emotion API ๊ฒฐ๊ณผ๊ฐ ์ถ์ถํ๊ธฐ
people.append("[" + fa["gender"].capitalize() + ':' + str(int(fa["age"])) + "]")
emotion.append(fa['emotion'])
# ax.axes.add_patch(p)
# plt.text(origin[0], origin[1], "%s, %d"%(fa["gender"].capitalize(), fa["age"]),
# fontsize=20, weight="bold", va="bottom")
# _ = plt.axis("off")
people
emotion
# ๊ฐ์ ๋ถ์ ๊ฒฐ๊ณผ๋ฅผ text๋ก ์ถ๋ ฅ
result_str = ''
import pandas as pd
# Pandas๋ฅผ ํ์ฉํ์ฌ ๊ฒฐ๊ณผ๊ฐ์ ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌํ๋ค
for count, faceapi in enumerate(emotion):
face_se = pd.Series(faceapi)
face_se = face_se.sort_values(ascending=False)
result_str += " " + people[count] + " "
for no in range(len(face_se)):
result_str += face_se.index[no]
result_str += " : " + str(face_se[no]) + " "
result_str
###Output
_____no_output_____
###Markdown
**4 Emotion ํจ์ ๋ง๋ค๊ธฐ**Django ์์ ์ฌ์ฉํ ๊ฒฐ๊ณผ๊ฐ ์ถ๋ ฅ ํจ์๋ก ์ ๋ฆฌํ๊ธฐhttps://docs.microsoft.com/en-gb/azure/cognitive-services/face/quickstarts/python
###Code
# try : ์์
์ ์๋ํ๋ค
# except : ์์
์ค ์ค๋ฅ๊ฐ ๋ฌ์ ๋ ์ฒ๋ฆฌํ ๋ด์ฉ์ ๊ธฐ๋กํ๋ค
for i in ['ํ๊ตญ', '์ผ๋ณธ', '๋ฏธ๊ตญ']:
try:
i /= i
except:
print('+ ์ฐ์ฐ์ด ๋์ง ์๋ ๊ฐ์ฒด์
๋๋ค')
i
# MS-API๋ฅผ ์ฌ์ฉํ์ฌ ์ธ๋ฌผ์ ๋ณด๋ฅผ ์ถ์ถํ๋ค
def faceapi(image_url):
try:
import requests
subscription_key = "key๊ฐ์ ์
๋ ฅํ์ธ์"
assert subscription_key
face_api_url = 'https://westcentralus.api.cognitive.microsoft.com/face/v1.0/detect'
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,' +
'emotion,hair,makeup,occlusion,accessories,blur,exposure,noise'}
data = {'url': image_url}
response = requests.post(face_api_url, params=params, headers=headers, json=data)
faces = response.json()
emotion, people = [], []
for face in faces:
fa = face["faceAttributes"]
people.append("[" + fa["gender"].capitalize() + ':' + str(int(fa["age"])) + "]")
emotion.append(fa['emotion'])
# ๊ฐ์ ๋ถ์ ๊ฒฐ๊ณผ๋ฅผ text๋ก ์ถ๋ ฅ
result_str = ''
import pandas as pd
for count, faceapi in enumerate(emotion):
face_se = pd.Series(faceapi)
face_se = face_se.sort_values(ascending=False)
result_str += " " + people[count] + " "
for no in range(len(face_se)):
result_str += face_se.index[no]
result_str += " : " + str(face_se[no]) + " "
except:
result_str = '์ด๋ฏธ์ง๊ฐ ๋ถ์์ ์ ํฉํ์ง ์์ต๋๋ค.'
return result_str
###Output
_____no_output_____ |
week3/Regex for Text Processing (In-Class).ipynb | ###Markdown
Regex IntroductionUse the [regexr.com](regexr.com) to practice and hone your regular expressions before applying them in Python.
###Code
import re # standard Python library for text regular expression parsing
SAMPLE_TWEET = '''
#wolfram Alpha SUCKS! Even for researchers the information provided is less than you can get from
#google or #wikipedia, totally useless!"
'''
###Output
_____no_output_____
###Markdown
`re.match` searches starting that the beginning of the string, while `re.search` searches the entire string. Match the first time a capital letter appears in the tweet
###Code
match = re.search("[a-z]", SAMPLE_TWEET)
match.group()
###Output
_____no_output_____
###Markdown
Match all capital letters that appears in the tweet
###Code
re.findall("[A-Z]", SAMPLE_TWEET)
###Output
_____no_output_____
###Markdown
Match all words that are at least 3 characters long
###Code
re.findall("[a-zA-Z0-9]{3,}", SAMPLE_TWEET)
###Output
_____no_output_____
###Markdown
Match all hashtags in the tweet
###Code
re.findall("#[a-zA-Z0-9]+", SAMPLE_TWEET)
###Output
_____no_output_____
###Markdown
Match all hashtags in the tweets, capture only the text of the hashtag
###Code
# capturing groups
re.findall("#([\w]+)", SAMPLE_TWEET)
###Output
_____no_output_____
###Markdown
Match all words that start with `t`, and are followed by `h` or `o`
###Code
re.findall("(?:th|to)\w*", SAMPLE_TWEET)
###Output
_____no_output_____
###Markdown
Match all words that end a sentence
###Code
re.findall("(\w+)(\.|\?|\!)", SAMPLE_TWEET)
###Output
_____no_output_____
###Markdown
Match word boundary*A thorough examination of the movie shows Thor was a thorn in the side of the villains. Thor.*```pythonre.findall("\b[tT]hor\b", SAMPLE_TWEET)``` How to Handle When the Regex Does Not Match?
###Code
SAMPLE_TWEET = "A thorough examination of the movie shows Thor was a thorn in the side of the villains. Thor."
re.findall("\b[tT]hor\b", SAMPLE_TWEET)
mylist = "ASdad"
if re.findall("\\bThor\\b", SAMPLE_TWEET):
print("Found")
else:
print("Not found")
###Output
Found
###Markdown
Using Regex Combined with Pandas
###Code
import pandas as pd
# load in dataframe
# get rid of some columns we don't care about
# preview the data
# get length of tweets in characters
# count number of times Obama appears in tweets
# find all the @s in the tweets
# Mon May 11 03:17:40 UTC 2009
# get the weekday of tweet
# get the month of the tweet
# get the year of the tweet
###Output
_____no_output_____
###Markdown
Exercises (15 minutes)1. Identify the list of email addresses for your security administrator to blacklist from your company's email servers.2. Identify any IP addresses that should be blacklisted (an IPv4 address goes from **1.1.1.1 to 255.255.255.255**)3. Find a sensible way to identify all names of individuals in the spam emails.3. Find all hashtags mentioned in the tweets dataset. Store it as a separate column called **hashtags**.
###Code
# 1 Identify the list of email addresses for your security administrator to blacklist from your company's email servers.
# 2 Identify any IP addresses that should be blacklisted (an IPv4 address goes from **1.1.1.1 to 255.255.255.255**)
# 3 Find a sensible way to identify all names of individuals in the spam emails.
# 4 Find all hashtags mentioned in the tweets dataset. Store it as a separate column called **hashtags**.
###Output
_____no_output_____ |
DataScience/3.Pandas/My_Practice.ipynb | ###Markdown
Pandas Series
###Code
import numpy as np
import pandas as pd
labels = ['a', 'b', 'c']
my_data = [10, 20, 30]
arr = np.array(my_data)
d = {'a': 10, 'b': 20, 'c': 30}
pd.Series(data = my_data)
pd.Series(data = my_data, index=labels)
pd.Series(my_data, labels)
pd.Series(arr, labels)
pd.Series(d)
pd.Series(data=labels)
pd.Series(data = [sum, print, len])
ser1 = pd.Series([1, 2, 3, 4], ['USA', 'GERMANY', 'JAPAN', 'INDIA'])
ser1
ser2 = pd.Series([1, 2, 3, 4], ['USA', 'GERMANY', 'ITALY', 'INDIA'])
ser2
ser1['USA']
ser1 + ser2
###Output
_____no_output_____
###Markdown
DataFrames A DataFrame is a bunch of series that shares common index
###Code
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df
df['W']
type(df['W'])
type(df)
df.W #Not recommended as we may get confused
df[['W', 'Z']]
df['new']
df['new'] = df['W'] + df['X']
df
df.drop('new', axis = 1)
df
df.drop('new', axis = 1, inplace=True)
df
df.drop('E')
df
df.shape
# Accessing the Row values
df.loc['C'] #for location or labeled based index
df.iloc[2] #for numerical based index
df.loc['A', 'X']
df.loc[['A', 'C'],['Z', 'Y']]
df > 0
booldf = df > 0
df[booldf]
df[df > 0]
df['W'] > 0
df[df['W'] > 0]
df[df['Z'] < 0]
resultantdf = df[df['W'] > 0]
resultantdf
resultantdf['Y']
df[df['W'] > 0][['Y', 'X']]
boolSer = df['W'] > 0
boolSer
result = df[boolSer]
result
mycols = ['Y', 'X']
result[mycols]
df[(df['W']>0) and (df['X'] > 0)] # and operator cann't compare a series of boolean values to another boolean values. It can compare only a boolean to another boolean
True and False
df[(df['W'] > 0) & ( df['X'] > 0)]
df[(df['W'] > 0) | ( df['Y'] > 1)]
df
df.reset_index() #It will reset the row indices to integers starting from 0 but it will not effect the DataFrame. If you want to effect DataFrame place - df.reset_index(inplace=True)
df
df['States'] = 'CA IN US CO RA'.split()
df
df.set_index('States') # To set one of the column as row indices. Note: you can use inplace='True' argument to overwrite the original.
#Index levels
outside = ['G1', 'G1', 'G1','G2', 'G2', 'G2']
inside = [1, 2, 3, 1, 2, 3]
hier_index = list(zip(outside, inside))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index
df = pd.DataFrame(randn(6, 2), hier_index, ['A', 'B'])
df
df.loc['G1']
df.loc['G1'].loc[1]
df.index.names
df.index.names = ['Groups', 'Num']
df
df.loc['G2'].loc[3]['B']
df.xs('G2') #It is cross-section
df.xs(2, level='Num') #It is used to get the values of any level which is harder using loc method
###Output
_____no_output_____
###Markdown
Missing Data
###Code
dd = {'A': [1, 4, np.nan], 'B': [7, np.nan, np.nan], 'C': [1, 2, 3]}
dff = pd.DataFrame(dd)
dff
dff.dropna() #It will drop rows having the null values.Note: you can use inplace='True' argument to overwrite the original.
dff.dropna(axis=1) #It will drop columns having the null values.Note: you can use inplace='True' argument to overwrite the original.
dff
dff.dropna(thresh=2) # It will drop the rows having null values that are greater than or equal to thresh value
dff.fillna('FILL VALUE')
dff
dff['A'].fillna(df['A'].mean())
dff
###Output
_____no_output_____
###Markdown
Groupby - Groupby allows you to group together rows based off of a column and perform an aggregate function on them
###Code
dataa = {'Company':['GOOG','GOOG','MSFT','MSFT','FB','FB'],
'Person':['Sam','Charlie','Amy','Vanessa','Carl','Sarah'],
'Sales':[200,120,340,124,243,350]}
dfff = pd.DataFrame(dataa)
dfff
dfff.groupby('Company')
byComp = dfff.groupby('Company')
byComp
byComp.mean() #As mean is the numeric expression It will ignore the non-numeric columns.
byComp.sum()
byComp.std()
byComp.sum().loc['FB']
dfff.groupby('Company').sum().loc['FB']
dfff.groupby('Company').count()
dfff.groupby('Company').min()
dfff.groupby('Company').describe()
dfff.groupby('Company').describe().transpose()
dfff.groupby('Company').describe().transpose()['FB']
###Output
_____no_output_____
###Markdown
Merging, Joining, and Concatenating Concatenating
###Code
df1 = pd.DataFrame({'A':['A0', 'A1', 'A2', 'A3'],
'B':['B0', 'B1', 'B2', 'B3'],
'C':['C0', 'C1', 'C2', 'C3'],
'D':['D0', 'D1', 'D2', 'D3']},
index=[0,1,2,3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
df1
df2
df3
pd.concat([df1, df2, df3])
pd.concat([df1, df2, df3], axis=1)
###Output
_____no_output_____
###Markdown
Merging
###Code
left = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'key': ['K0', 'K1', 'K2', 'K3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
pd.merge(left, right, how='inner', on='key')
left2 = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right2 = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
pd.merge(left2, right2, on=['key1', 'key2'])
pd.merge(left2, right2, how='outer', on=['key1', 'key2'])
pd.merge(left2, right2, how='right', on=['key1', 'key2'])
pd.merge(left2, right2, how='left', on=['key1', 'key2'])
###Output
_____no_output_____
###Markdown
Joining
###Code
left3 = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right3 = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
left3.join(right3)
left3.join(right3, how='outer')
left3.join(right3, how='inner')
left3.join(right3, how='right')
import pandas as pd
df4 = pd.DataFrame({'col1':[1,2,3,4],'col2':[444,555,666,444],'col3':['abc','def','ghi','xyz']})
df4.head()
df4['col2'].unique()
df4['col2'].nunique()
df4['col2'].value_counts()
df4[(df4['col2'] == 444) & (df4['col1']> 2)]
def power2(x):
return x ** 2
power2(4)
df4['col1'].apply(power2)
df4['col2'].apply(lambda x: x + 5)
df4['col3'].apply(len)
df4.columns
df4.index
df4.sort_values('col2')
df4.sort_values(by = 'col2')
df4.isnull()
data3 = {'A':['foo','foo','foo','bar','bar','bar'],
'B':['one','one','two','two','one','one'],
'C':['x','y','x','y','x','y'],
'D':[1,3,2,5,4,1]}
df5 = pd.DataFrame(data3)
df5
df5.pivot_table(values='D',index=['A', 'B'],columns=['C'])
###Output
_____no_output_____
###Markdown
Data Input and Output - sqlalchemy for SQL files- lxml for XML and HTML files- html5lib for HTML files- BeautifulSoup4 for HTML files- Pandas can read CSV files - CSV- Excel- HTML- SQL
###Code
pwd
pd.read_csv('example.csv')
df6 = pd.read_csv('example.csv')
df6
df6.to_csv('my_output')
pd.read_csv('my_output')
df6.to_csv('my_output', index=False)
pd.read_csv('my_output')
pd.read_excel('Excel_Sample.xlsx', sheet_name='Sheet1')
df6.to_excel('Excel_Sample2.xlsx', sheet_name='NewSheet')
df7 = pd.read_html('http://www.fdic.gov/bank/individual/failed/banklist.html')
df7[0]
df7[0].head()
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:')
df6.to_sql('my_table', engine)
sqldf = pd.read_sql('my_table', con=engine)
sqldf
###Output
_____no_output_____ |
Section-12-Engineering-Date-Time/12.02_Engineering_time.ipynb | ###Markdown
Engineering TimeIn this demo, we are going to extract different ways of representing time from a timestamp. We can extract for example:- hour- minute- second- data- elapsed timeWe will create a toy dataset for the demonstration.
###Code
import pandas as pd
import numpy as np
import datetime
# let's create a toy data set: 1 column 7 different timestamps,
# 1 hr difference between timestamp
date = pd.Series(pd.date_range('2015-1-5 11:20:00', periods=7, freq='H'))
df = pd.DataFrame(dict(date=date))
df
###Output
_____no_output_____
###Markdown
Extract the hr, minute and second
###Code
df['hour'] = df['date'].dt.hour
df['min'] = df['date'].dt.minute
df['sec'] = df['date'].dt.second
df
###Output
_____no_output_____
###Markdown
Extract time part
###Code
df['time'] = df['date'].dt.time
df
###Output
_____no_output_____
###Markdown
Extract hr, min, sec, at the same time
###Code
# now let's repeat what we did in cell 3 in 1 command
df[['h','m','s']] = pd.DataFrame([(x.hour, x.minute, x.second) for x in df['time']])
df
###Output
_____no_output_____
###Markdown
Calculate time difference
###Code
# let's create another toy dataframe with 2 timestamp columns
# and 7 rows each, in the first column the timestamps change monthly,
# in the second column the timestamps change weekly
date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=7, freq='M'))
date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=7, freq='W'))
df = pd.DataFrame(dict(Start_date = date1, End_date = date2))
df
# let's calculate the time elapsed in seconds
df['diff_seconds'] = df['End_date'] - df['Start_date']
df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'s')
df
# let's calculate the time elapsed in minutes
df['diff_seconds'] = df['End_date'] - df['Start_date']
df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'m')
df
###Output
_____no_output_____
###Markdown
For more details visit [this article](http://www.datasciencemadesimple.com/difference-two-timestamps-seconds-minutes-hours-pandas-python-2/) Work with different timezonesIn the next few cells, we will see how to work with timestamps that are in different time zones.
###Code
# first, let's create a toy dataframe with some timestamps in different time zones
df = pd.DataFrame()
df['time'] = pd.concat([
pd.Series(
pd.date_range(
start='2014-08-01 09:00', freq='H', periods=3,
tz='Europe/Berlin')),
pd.Series(
pd.date_range(
start='2014-08-01 09:00', freq='H', periods=3, tz='US/Central'))
], axis=0)
df
###Output
_____no_output_____
###Markdown
We can see the different timezones indicated by the +2 and -5, respect to the meridian.
###Code
# to work with different time zones, first we unify the timezone to the central one
# setting utc = True
df['time_utc'] = pd.to_datetime(df['time'], utc=True)
# next we change all timestamps to the desired timezone, eg Europe/London
# in this example
df['time_london'] = df['time_utc'].dt.tz_convert('Europe/London')
df
###Output
_____no_output_____
###Markdown
Engineering TimeIn this demo, we are going to extract different ways of representing time from a timestamp. We can extract for example:- hour- minute- second- data- elapsed timeWe will create a toy dataset for the demonstration.
###Code
import pandas as pd
import numpy as np
import datetime
# let's create a toy data set: 1 column 7 different timestamps,
# 1 hr difference between timestamp
date = pd.Series(pd.date_range('2015-1-5 11:20:00', periods=7, freq='H'))
df = pd.DataFrame(dict(date=date))
df
###Output
_____no_output_____
###Markdown
Extract the hr, minute and second
###Code
df['hour'] = df['date'].dt.hour
df['min'] = df['date'].dt.minute
df['sec'] = df['date'].dt.second
df
###Output
_____no_output_____
###Markdown
Extract time part
###Code
df['time'] = df['date'].dt.time
df
###Output
_____no_output_____
###Markdown
Extract hr, min, sec, at the same time
###Code
# now let's repeat what we did in cell 3 in 1 command
df[['h','m','s']] = pd.DataFrame([(x.hour, x.minute, x.second) for x in df['time']])
df
###Output
_____no_output_____
###Markdown
Calculate time difference
###Code
# let's create another toy dataframe with 2 timestamp columns
# and 7 rows each, in the first column the timestamps change monthly,
# in the second column the timestamps change weekly
date1 = pd.Series(pd.date_range('2012-1-1 12:00:00', periods=7, freq='M'))
date2 = pd.Series(pd.date_range('2013-3-11 21:45:00', periods=7, freq='W'))
df = pd.DataFrame(dict(Start_date = date1, End_date = date2))
df
# let's calculate the time elapsed in seconds
df['diff_seconds'] = df['End_date'] - df['Start_date']
df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'s')
df
# let's calculate the time elapsed in minutes
df['diff_seconds'] = df['End_date'] - df['Start_date']
df['diff_seconds']=df['diff_seconds']/np.timedelta64(1,'m')
df
###Output
_____no_output_____
###Markdown
For more details visit [this article](http://www.datasciencemadesimple.com/difference-two-timestamps-seconds-minutes-hours-pandas-python-2/) Work with different timezonesIn the next few cells, we will see how to work with timestamps that are in different time zones.
###Code
# first, let's create a toy dataframe with some timestamps in different time zones
df = pd.DataFrame()
df['time'] = pd.concat([
pd.Series(
pd.date_range(
start='2014-08-01 09:00', freq='H', periods=3,
tz='Europe/Berlin')),
pd.Series(
pd.date_range(
start='2014-08-01 09:00', freq='H', periods=3, tz='US/Central'))
], axis=0)
df
###Output
_____no_output_____
###Markdown
We can see the different timezones indicated by the +2 and -5, respect to the meridian.
###Code
# to work with different time zones, first we unify the timezone to the central one
# setting utc = True
df['time_utc'] = pd.to_datetime(df['time'], utc=True)
# next we change all timestamps to the desired timezone, eg Europe/London
# in this example
df['time_london'] = df['time_utc'].dt.tz_convert('Europe/London')
df
###Output
_____no_output_____ |
docs/nb/DM_Halos and DM_IGM.ipynb | ###Markdown
DM_Halos and DM_IGMSplitting $\langle DM_{cosmic}\rangle$ into its constituents.
###Code
# imports
from importlib import reload
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units as u
from frb.halos import ModifiedNFW
from frb import halos as frb_halos
from frb import igm as frb_igm
from frb.figures import utils as ff_utils
from matplotlib import pyplot as plt
plt.rcParams['font.size'] = 17
###Output
_____no_output_____
###Markdown
$\langle \rho_{diffuse, cosmic}\rangle$Use `f_diffuse` to calculate the average mass fraction of diffuse gas and diffuse gas density (physical). Math described in [DM_cosmic.ipynb](DM_cosmic.ipynb).
###Code
help(frb_igm.f_diffuse)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True)
# Plot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
ax1.plot(zvals, f_diffuse, lw=2)
ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$')
ax2 = axs[1]
ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2)
ax2.set_yscale("log")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle n_{e,cosmic}\rangle$
###Code
help(frb_igm.ne_cosmic)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
avg_ne = frb_igm.ne_cosmic(zvals)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2)
plt.yscale("log")
plt.legend(loc = "upper left")
plt.xlabel('z')
plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle DM_{cosmic}\rangle$See [DM_cosmic.ipynb](DM_cosmic.ipynb) for details regarding its computation.
###Code
help(frb_igm.average_DM)
DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, DM_cosmic, lw=2)
plt.xlabel('z')
plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$ The fraction of free electrons present in halos should be equal to the fraction of diffuse gas in halos assuming the ionization state of the individual species is only dependent on redshift (and not gas density as well). $$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{diffuse,halos}}{\rho_{diffuse,cosmic}}\\& = \frac{\rho_{b, halos}f_{hot}}{\rho_{b, cosmic}f_{diffuse, cosmic}}\\\end{aligned}$$Here $\rho_b$ refers to baryon density. $f_{hot}$ refers to the fraction of baryons in halos that is in the hot phase ($\sim10^7$ K). The remaining baryons are either in the neutral phase or in dense objects like stars. Assuming halos have the same baryon mass fraction as the universal average ($\Omega_b/\Omega_M$)$$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{m, halos}f_{hot}}{\rho_{m, cosmic}f_{diffuse, cosmic}}\\& = \frac{f_{halos} f_{hot}}{f_{diffuse, cosmic}}\\\end{aligned}$$$f_{halos}$ can be computed as a function of redshift by integrating the halo mass function (HMF) times mass over some mass range and dividing it by the density of matter in the universe. This allows us to compute a line of sight integral of $\langle n_{e, halos} \rangle$ to get $\langle DM_{halos}\rangle$. $\langle DM_{IGM}\rangle$ is just obtained by subtracting this from $\langle DM_{cosmic}\rangle$.Apart from $f_{hot}$ being an obvious free parameter, we also allow variation in the radial extent of halos. This is encoded in the parameter $r_{max}$ which is the radial extent of halos in units of $r_{200}$. Setting $r_{max}>1$ (for all halos; currently it is mass independent) smoothly extends the NFW profile and the modifid profile of the encased diffuse baryons.
###Code
help(frb_igm.average_DMhalos)
# evaluation
frb_igm.average_DMhalos(0.1)
# get cumulative DM_halos
dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True)
dm
zvals
fhot_array = [0.2, 0.5, 0.75]
rmax_array = [0.5, 1.0 , 2.0]
# <DM_halos> for different f_hot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
for f_hot in fhot_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True)
ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot))
ax1.legend(title="f_hot")
ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
# <DM_halos> for different rmax
ax2 = axs[1]
for rmax in rmax_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True)
ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax))
ax2.legend(title="rmax")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
plt.show()
# Limits of calculation
frb_igm.average_DMhalos(3.1)
# Failure above redshift 5
frb_igm.average_DMhalos(5.1)
help(frb_igm.average_DMIGM)
# Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0
dm, zvals = frb_igm.average_DM(0.1, cumul= True)
dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True)
dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True)
plt.plot(zvals, dm - dm_halos - dm_igm)
plt.ylabel(r"DM $pc~cm^{-3}$")
plt.xlabel("z")
plt.show()
###Output
_____no_output_____
###Markdown
DM_Halos and DM_IGMSplitting $\langle DM_{cosmic}\rangle$ into its constituents.
###Code
# imports
from importlib import reload
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units as u
from frb.halos import ModifiedNFW
from frb import halos as frb_halos
from frb import igm as frb_igm
from frb.figures import utils as ff_utils
from matplotlib import pyplot as plt
plt.rcParams['font.size'] = 17
###Output
_____no_output_____
###Markdown
$\langle \rho_{diffuse, cosmic}\rangle$Use `f_diffuse` to calculate the average mass fraction of diffuse gas and diffuse gas density (physical). Math described in [DM_cosmic.ipynb](DM_cosmic.ipynb).
###Code
help(frb_igm.f_diffuse)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True)
# Plot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
ax1.plot(zvals, f_diffuse, lw=2)
ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$')
ax2 = axs[1]
ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2)
ax2.set_yscale("log")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle n_{e,cosmic}\rangle$
###Code
help(frb_igm.ne_cosmic)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
avg_ne = frb_igm.ne_cosmic(zvals)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2)
plt.yscale("log")
plt.legend(loc = "upper left")
plt.xlabel('z')
plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle DM_{cosmic}\rangle$See [DM_cosmic.ipynb](DM_cosmic.ipynb) for details regarding its computation.
###Code
help(frb_igm.average_DM)
DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, DM_cosmic, lw=2)
plt.xlabel('z')
plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$ The fraction of free electrons present in halos should be equal to the fraction of diffuse gas in halos assuming the ionization state of the individual species is only dependent on redshift (and not gas density as well). $$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{diffuse,halos}}{\rho_{diffuse,cosmic}}\\& = \frac{\rho_{b, halos}f_{hot}}{\rho_{b, cosmic}f_{diffuse, cosmic}}\\\end{aligned}$$Here $\rho_b$ refers to baryon density. $f_{hot}$ refers to the fraction of baryons in halos that is in the hot phase ($\sim10^7$ K). The remaining baryons are either in the neutral phase or in dense objects like stars. Assuming halos have the same baryon mass fraction as the universal average ($\Omega_b/\Omega_M$)$$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{m, halos}f_{hot}}{\rho_{m, cosmic}f_{diffuse, cosmic}}\\& = \frac{f_{halos} f_{hot}}{f_{diffuse, cosmic}}\\\end{aligned}$$$f_{halos}$ can be computed as a function of redshift by integrating the halo mass function (HMF) times mass over some mass range and dividing it by the density of matter in the universe. This allows us to compute a line of sight integral of $\langle n_{e, halos} \rangle$ to get $\langle DM_{halos}\rangle$. $\langle DM_{IGM}\rangle$ is just obtained by subtracting this from $\langle DM_{cosmic}\rangle$.Apart from $f_{hot}$ being an obvious free parameter, we also allow variation in the radial extent of halos. This is encoded in the parameter $r_{max}$ which is the radial extent of halos in units of $r_{200}$. Setting $r_{max}>1$ (for all halos; currently it is mass independent) smoothly extends the NFW profile and the modifid profile of the encased diffuse baryons.
###Code
help(frb_igm.average_DMhalos)
# evaluation
frb_igm.average_DMhalos(0.1)
# get cumulative DM_halos
dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True)
dm
zvals
fhot_array = [0.2, 0.5, 0.75]
rmax_array = [0.5, 1.0 , 2.0]
# <DM_halos> for different f_hot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
for f_hot in fhot_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True)
ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot))
ax1.legend(title="f_hot")
ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
# <DM_halos> for different rmax
ax2 = axs[1]
for rmax in rmax_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True)
ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax))
ax2.legend(title="rmax")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
plt.show()
# Limits of calculation
frb_igm.average_DMhalos(3.1)
# Failure above redshift 5
frb_igm.average_DMhalos(5.1)
help(frb_igm.average_DMIGM)
# Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0
dm, zvals = frb_igm.average_DM(0.1, cumul= True)
dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True)
dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True)
plt.plot(zvals, dm - dm_halos - dm_igm)
plt.ylabel(r"DM $pc~cm^{-3}$")
plt.xlabel("z")
plt.show()
###Output
_____no_output_____
###Markdown
DM_Halos and DM_IGMSplitting $\langle DM_{cosmic}\rangle$ into its constituents.
###Code
# imports
from importlib import reload
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as IUS
from astropy import units as u
from frb.halos.models import ModifiedNFW
from frb.halos import models as frb_halos
from frb.halos import hmf as frb_hmf
from frb.dm import igm as frb_igm
from frb.figures import utils as ff_utils
from matplotlib import pyplot as plt
plt.rcParams['font.size'] = 17
###Output
_____no_output_____
###Markdown
$\langle \rho_{diffuse, cosmic}\rangle$Use `f_diffuse` to calculate the average mass fraction of diffuse gas and diffuse gas density (physical). Math described in [DM_cosmic.ipynb](DM_cosmic.ipynb).
###Code
help(frb_igm.f_diffuse)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
f_diffuse, rho_diffuse = frb_igm.f_diffuse(zvals, return_rho = True)
# Plot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
ax1.plot(zvals, f_diffuse, lw=2)
ax1.set_ylabel(r'$\langle f_{diffuse, cosmic}\rangle$')
ax2 = axs[1]
ax2.plot(zvals, rho_diffuse.to('Msun*Mpc**-3'), lw=2)
ax2.set_yscale("log")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle \rho_{diffuse, cosmic}\rangle$ $M_\odot~Mpc^{-3}$')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle n_{e,cosmic}\rangle$
###Code
help(frb_igm.ne_cosmic)
# Define redshifts
zvals = np.linspace(0, 8)
# Get <n_e>
avg_ne = frb_igm.ne_cosmic(zvals)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, avg_ne, label=r'$\langle n_{e, cosmic}\rangle$', lw=2)
plt.yscale("log")
plt.legend(loc = "upper left")
plt.xlabel('z')
plt.ylabel(r'$\langle n_{e, cosmic}\rangle$ [$cm^{-3}$]')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle DM_{cosmic}\rangle$See [DM_cosmic.ipynb](DM_cosmic.ipynb) for details regarding its computation.
###Code
help(frb_igm.average_DM)
DM_cosmic, zvals = frb_igm.average_DM(8, cumul=True)
# Visualize
fig = plt.figure(figsize = (10, 6))
plt.plot(zvals, DM_cosmic, lw=2)
plt.xlabel('z')
plt.ylabel(r'$\langle DM_{cosmic}\rangle$ $pc~cm^{-3}$')
plt.show()
###Output
_____no_output_____
###Markdown
$\langle DM_{halos}\rangle$ and $\langle DM_{IGM}\rangle$ The fraction of free electrons present in halos should be equal to the fraction of diffuse gas in halos assuming the ionization state of the individual species is only dependent on redshift (and not gas density as well). $$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{diffuse,halos}}{\rho_{diffuse,cosmic}}\\& = \frac{\rho_{b, halos}f_{hot}}{\rho_{b, cosmic}f_{diffuse, cosmic}}\\\end{aligned}$$Here $\rho_b$ refers to baryon density. $f_{hot}$ refers to the fraction of baryons in halos that is in the hot phase ($\sim10^7$ K). The remaining baryons are either in the neutral phase or in dense objects like stars. Assuming halos have the same baryon mass fraction as the universal average ($\Omega_b/\Omega_M$)$$\begin{aligned}\frac{\langle n_{e, halos}\rangle}{\langle n_{e, cosmic}\rangle} & = \frac{\rho_{m, halos}f_{hot}}{\rho_{m, cosmic}f_{diffuse, cosmic}}\\& = \frac{f_{halos} f_{hot}}{f_{diffuse, cosmic}}\\\end{aligned}$$$f_{halos}$ can be computed as a function of redshift by integrating the halo mass function (HMF) times mass over some mass range and dividing it by the density of matter in the universe. This allows us to compute a line of sight integral of $\langle n_{e, halos} \rangle$ to get $\langle DM_{halos}\rangle$. $\langle DM_{IGM}\rangle$ is just obtained by subtracting this from $\langle DM_{cosmic}\rangle$.Apart from $f_{hot}$ being an obvious free parameter, we also allow variation in the radial extent of halos. This is encoded in the parameter $r_{max}$ which is the radial extent of halos in units of $r_{200}$. Setting $r_{max}>1$ (for all halos; currently it is mass independent) smoothly extends the NFW profile and the modifid profile of the encased diffuse baryons.
###Code
help(frb_igm.average_DMhalos)
# evaluation
frb_igm.average_DMhalos(0.1)
# get cumulative DM_halos
dm, zvals = frb_igm.average_DMhalos(0.1, cumul = True)
dm
zvals
fhot_array = [0.2, 0.5, 0.75]
rmax_array = [0.5, 1.0 , 2.0]
# <DM_halos> for different f_hot
fig, axs = plt.subplots(2,1, sharex=True, figsize = (8,7))
fig.tight_layout()
ax1 = axs[0]
for f_hot in fhot_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, f_hot = f_hot, cumul=True)
ax1.plot(zeval, DM_halos, label="{:0.1f}".format(f_hot))
ax1.legend(title="f_hot")
ax1.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
# <DM_halos> for different rmax
ax2 = axs[1]
for rmax in rmax_array:
DM_halos, zeval = frb_igm.average_DMhalos(3, rmax = rmax, cumul = True)
ax2.plot(zeval, DM_halos, label="{:0.1f}".format(rmax))
ax2.legend(title="rmax")
ax2.set_xlabel('z')
ax2.set_ylabel(r'$\langle DM_{halos}\rangle$ $pc~cm^{-3}$')
plt.show()
# Limits of calculation
frb_igm.average_DMhalos(3.1)
# Failure above redshift 5
frb_igm.average_DMhalos(5.1)
help(frb_igm.average_DMIGM)
# Sanity check. <DM_cosmic> - (<DM_halos> + <DM_IGM) = 0
dm, zvals = frb_igm.average_DM(0.1, cumul= True)
dm_halos, _ = frb_igm.average_DMhalos(0.1, cumul = True)
dm_igm, _ = frb_igm.average_DMIGM(0.1, cumul = True)
plt.plot(zvals, dm - dm_halos - dm_igm)
plt.ylabel(r"DM $pc~cm^{-3}$")
plt.xlabel("z")
plt.show()
###Output
_____no_output_____ |
00_qdrant.ipynb | ###Markdown
productioncddocker run -p 6333:6333 \ -v $(pwd)/qdrant_storage:/qdrant/storage \ --name qdrant_prod \ qdrant/qdrantdocker update --restart unless-stopped qdrant_prod devcddocker run -p 6334:6333 \ -v $(pwd)/qdrant_storage_dev:/qdrant/storage \ --name qdrant_dev \ qdrant/qdrantdocker update --restart unless-stopped qdrant_dev
###Code
# !pip install --upgrade qdrant_client
#export
dim = 768 #+ onehot.n_dim()
QdrantClient.get_collections = lambda self: [c['name'] for c in self.http.collections_api.get_collections().dict()['result']['collections']]
QdrantClient.collection_len = lambda self, name: self.http.collections_api.get_collection(name).dict()['result']['vectors_count']
#export
prod_client = QdrantClient(host='localhost', port=6333)
dev_client = QdrantClient(host='localhost', port=6334)
collection_name
dev_client.recreate_collection(collection_name,dim,qdrant_client.http.models.Distance.DOT)
prod_client.get_collections(),dev_client.get_collections()
collection_name
dev_client.collection_len(collection_name)
!nbdev_build_lib
###Output
Converted 00_clipmodel.ipynb.
Converted 00_custom_pandas.ipynb.
Converted 00_paths.ipynb.
Converted 00_progress_check.ipynb.
Converted 00_psql.ipynb.
Converted 00_qdrant.ipynb.
Converted 00_tools.ipynb.
Converted 01_multiple_foods.ipynb.
Converted 01_multiple_foods_segmantation.ipynb.
Converted 01_search.ipynb.
Converted 0_template copy 2.ipynb.
Converted 0_template copy.ipynb.
Converted 0_template.ipynb.
Converted OFA.ipynb.
Converted Untitled-1.ipynb.
Converted bot pseudocode.ipynb.
Converted bot_test.ipynb.
Converted classifying_glovo_images.ipynb.
Converted foodd dataset.ipynb.
Converted foods_prompted_tosql.ipynb.
Converted ideas.ipynb.
Converted inference.ipynb.
Converted multiple3105.ipynb.
Converted multiple_3005.ipynb.
No export destination, ignored:
#export
def search_image(url=None,head = 1):
image_clip = requests.post(f'https://guru.skynet.center/image2vector/?url={url}').json()
results = client.search(collection_name=collection_name,query_vector=image_clip,top=head)
image_clip = torch.Tensor(image_clip)
df = foods.loc[[r.id for r in results]].copy()
df['score'] = [r.score for r in results]
df = df.sort_values('score',ascending=False)
return image_clip,df.reset_index()
series2tensor = lambda series:torch.tensor([np.array(c) for c in series.values])
Warning: Exporting to "None.py" but this module is not part of this build
Traceback (most recent call last):
File "/home/dima/anaconda3/envs/food/bin/nbdev_build_lib", line 8, in <module>
sys.exit(nbdev_build_lib())
File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/fastcore/script.py", line 112, in _f
tfunc(**merge(args, args_from_prog(func, xtra)))
File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export2html.py", line 465, in nbdev_build_lib
notebook2script(fname=fname, bare=bare)
File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export.py", line 430, in notebook2script
for f in sorted(files): d = _notebook2script(f, modules, silent=silent, to_dict=d, bare=bare)
File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export.py", line 357, in _notebook2script
if to_dict is None: _add2all(fname_out, [f"'{f}'" for f in names if '.' not in f and len(f) > 0] + extra)
File "/home/dima/anaconda3/envs/food/lib/python3.9/site-packages/nbdev/export.py", line 208, in _add2all
with open(fname, 'r', encoding='utf8') as f: text = f.read()
FileNotFoundError: [Errno 2] No such file or directory: '/home/dima/food/food/None.py'
|
NLP_Test.ipynb | ###Markdown
###Code
!pip install --upgrade gensim
!pip install nlpia
## data wrangling
import pandas as pd
import numpy as np
## plotting
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
import altair as alt
## misc
import json
import datetime
import warnings
warnings.filterwarnings("ignore")
import pickle
from collections import Counter
## Deep Learning
import keras
## pre-processing
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
## models
from keras import models
from keras.models import Sequential
## layers
from keras import layers
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
## word embedding
import gensim.downloader as api
## optimizer
from keras.optimizers import Adam
## evaluating model
from keras.callbacks import ReduceLROnPlateau
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import plot_model
###Output
Using TensorFlow backend.
###Markdown
Load Data
###Code
## twitter data
data_url = "https://raw.githubusercontent.com/papagorgio23/GamblingTwitter_Bot/master/Data/RNN_Data1.csv"
data = pd.read_csv(data_url)
data.head()
plt.figure(figsize = (25, 10))
ax = sns.countplot(x="screen_name", data=data)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
fontsize='x-large'
);
# Length of tweets
data['Tweet_length'] = data['Text_Parsed_1'].str.len()
plt.figure(figsize=(12.8,6))
sns.distplot(data['Tweet_length']).set_title('Tweet length distribution');
plt.figure(figsize=(25,6))
sns.boxplot(data=data, x='screen_name', y='Tweet_length', width=.5)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light',
fontsize='x-large'
);
# @username, links, \r and \n
data['Text_Parsed_1'] = data['text'].str.replace('@[^\s]+', " ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("http\S+", " ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("#", " hashtag ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("%", " percent ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace(" - ", " ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("pts", " points ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("reb", " rebounds ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("w/", " with ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("w/o", " without ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("w/out", " without ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("o/u", " over under ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("-[0-9]", " favored by some points ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("\+[0-9]", " underdog by some points ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace("\n", " ")
data['Text_Parsed_1'] = data['Text_Parsed_1'].str.replace(" ", " ")
data.head()
print("Raw: ", data.loc[1]['text'])
print()
print()
print("Cleaned: ", data.loc[1]['Text_Parsed_1'])
###Output
Raw: looking at l assit 10 years of data. home teams on thursday night football have a 60 percent win percent . home favs win 74 percent and ats home favs are 57.4 percent . on sunday gms the l assit 10 years home teams have an overall win percent of 57 percent . home favs win 68 percent and home favs ats is only 48 percent . so anecdotally home teams have an adv on thursday night football v sun
Cleaned: looking at l assit 10 years of data. home teams on thursday night football have a 60 percent win percent . home favs win 74 percent and ats home favs are 57.4 percent . on sunday gms the l assit 10 years home teams have an overall win percent of 57 percent . home favs win 68 percent and home favs ats is only 48 percent . so anecdotally home teams have an adv on thursday night football v sun
###Markdown
Load Pre-Trained Word Embeddings
###Code
info = api.info()
print(json.dumps(info, indent=4))
###Output
{
"corpora": {
"semeval-2016-2017-task3-subtaskBC": {
"num_records": -1,
"record_format": "dict",
"file_size": 6344358,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/semeval-2016-2017-task3-subtaskB-eng/__init__.py",
"license": "All files released for the task are free for general research use",
"fields": {
"2016-train": [
"..."
],
"2016-dev": [
"..."
],
"2017-test": [
"..."
],
"2016-test": [
"..."
]
},
"description": "SemEval 2016 / 2017 Task 3 Subtask B and C datasets contain train+development (317 original questions, 3,169 related questions, and 31,690 comments), and test datasets in English. The description of the tasks and the collected data is given in sections 3 and 4.1 of the task paper http://alt.qcri.org/semeval2016/task3/data/uploads/semeval2016-task3-report.pdf linked in section \u201cPapers\u201d of https://github.com/RaRe-Technologies/gensim-data/issues/18.",
"checksum": "701ea67acd82e75f95e1d8e62fb0ad29",
"file_name": "semeval-2016-2017-task3-subtaskBC.gz",
"read_more": [
"http://alt.qcri.org/semeval2017/task3/",
"http://alt.qcri.org/semeval2017/task3/data/uploads/semeval2017-task3.pdf",
"https://github.com/RaRe-Technologies/gensim-data/issues/18",
"https://github.com/Witiko/semeval-2016_2017-task3-subtaskB-english"
],
"parts": 1
},
"semeval-2016-2017-task3-subtaskA-unannotated": {
"num_records": 189941,
"record_format": "dict",
"file_size": 234373151,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/semeval-2016-2017-task3-subtaskA-unannotated-eng/__init__.py",
"license": "These datasets are free for general research use.",
"fields": {
"THREAD_SEQUENCE": "",
"RelQuestion": {
"RELQ_CATEGORY": "question category, according to the Qatar Living taxonomy",
"RELQ_DATE": "date of posting",
"RELQ_ID": "question indentifier",
"RELQ_USERID": "identifier of the user asking the question",
"RELQ_USERNAME": "name of the user asking the question",
"RelQBody": "body of question",
"RelQSubject": "subject of question"
},
"RelComments": [
{
"RelCText": "text of answer",
"RELC_USERID": "identifier of the user posting the comment",
"RELC_ID": "comment identifier",
"RELC_USERNAME": "name of the user posting the comment",
"RELC_DATE": "date of posting"
}
]
},
"description": "SemEval 2016 / 2017 Task 3 Subtask A unannotated dataset contains 189,941 questions and 1,894,456 comments in English collected from the Community Question Answering (CQA) web forum of Qatar Living. These can be used as a corpus for language modelling.",
"checksum": "2de0e2f2c4f91c66ae4fcf58d50ba816",
"file_name": "semeval-2016-2017-task3-subtaskA-unannotated.gz",
"read_more": [
"http://alt.qcri.org/semeval2016/task3/",
"http://alt.qcri.org/semeval2016/task3/data/uploads/semeval2016-task3-report.pdf",
"https://github.com/RaRe-Technologies/gensim-data/issues/18",
"https://github.com/Witiko/semeval-2016_2017-task3-subtaskA-unannotated-english"
],
"parts": 1
},
"patent-2017": {
"num_records": 353197,
"record_format": "dict",
"file_size": 3087262469,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/patent-2017/__init__.py",
"license": "not found",
"description": "Patent Grant Full Text. Contains the full text including tables, sequence data and 'in-line' mathematical expressions of each patent grant issued in 2017.",
"checksum-0": "818501f0b9af62d3b88294d86d509f8f",
"checksum-1": "66c05635c1d3c7a19b4a335829d09ffa",
"file_name": "patent-2017.gz",
"read_more": [
"http://patents.reedtech.com/pgrbft.php"
],
"parts": 2
},
"quora-duplicate-questions": {
"num_records": 404290,
"record_format": "dict",
"file_size": 21684784,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/quora-duplicate-questions/__init__.py",
"license": "probably https://www.quora.com/about/tos",
"fields": {
"question1": "the full text of each question",
"question2": "the full text of each question",
"qid1": "unique ids of each question",
"qid2": "unique ids of each question",
"id": "the id of a training set question pair",
"is_duplicate": "the target variable, set to 1 if question1 and question2 have essentially the same meaning, and 0 otherwise"
},
"description": "Over 400,000 lines of potential question duplicate pairs. Each line contains IDs for each question in the pair, the full text for each question, and a binary value that indicates whether the line contains a duplicate pair or not.",
"checksum": "d7cfa7fbc6e2ec71ab74c495586c6365",
"file_name": "quora-duplicate-questions.gz",
"read_more": [
"https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs"
],
"parts": 1
},
"wiki-english-20171001": {
"num_records": 4924894,
"record_format": "dict",
"file_size": 6516051717,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/wiki-english-20171001/__init__.py",
"license": "https://dumps.wikimedia.org/legal.html",
"fields": {
"section_texts": "list of body of sections",
"section_titles": "list of titles of sections",
"title": "Title of wiki article"
},
"description": "Extracted Wikipedia dump from October 2017. Produced by `python -m gensim.scripts.segment_wiki -f enwiki-20171001-pages-articles.xml.bz2 -o wiki-en.gz`",
"checksum-0": "a7d7d7fd41ea7e2d7fa32ec1bb640d71",
"checksum-1": "b2683e3356ffbca3b6c2dca6e9801f9f",
"checksum-2": "c5cde2a9ae77b3c4ebce804f6df542c2",
"checksum-3": "00b71144ed5e3aeeb885de84f7452b81",
"file_name": "wiki-english-20171001.gz",
"read_more": [
"https://dumps.wikimedia.org/enwiki/20171001/"
],
"parts": 4
},
"text8": {
"num_records": 1701,
"record_format": "list of str (tokens)",
"file_size": 33182058,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/text8/__init__.py",
"license": "not found",
"description": "First 100,000,000 bytes of plain text from Wikipedia. Used for testing purposes; see wiki-english-* for proper full Wikipedia datasets.",
"checksum": "68799af40b6bda07dfa47a32612e5364",
"file_name": "text8.gz",
"read_more": [
"http://mattmahoney.net/dc/textdata.html"
],
"parts": 1
},
"fake-news": {
"num_records": 12999,
"record_format": "dict",
"file_size": 20102776,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/fake-news/__init__.py",
"license": "https://creativecommons.org/publicdomain/zero/1.0/",
"fields": {
"crawled": "date the story was archived",
"ord_in_thread": "",
"published": "date published",
"participants_count": "number of participants",
"shares": "number of Facebook shares",
"replies_count": "number of replies",
"main_img_url": "image from story",
"spam_score": "data from webhose.io",
"uuid": "unique identifier",
"language": "data from webhose.io",
"title": "title of story",
"country": "data from webhose.io",
"domain_rank": "data from webhose.io",
"author": "author of story",
"comments": "number of Facebook comments",
"site_url": "site URL from BS detector",
"text": "text of story",
"thread_title": "",
"type": "type of website (label from BS detector)",
"likes": "number of Facebook likes"
},
"description": "News dataset, contains text and metadata from 244 websites and represents 12,999 posts in total from a specific window of 30 days. The data was pulled using the webhose.io API, and because it's coming from their crawler, not all websites identified by their BS Detector are present in this dataset. Data sources that were missing a label were simply assigned a label of 'bs'. There are (ostensibly) no genuine, reliable, or trustworthy news sources represented in this dataset (so far), so don't trust anything you read.",
"checksum": "5e64e942df13219465927f92dcefd5fe",
"file_name": "fake-news.gz",
"read_more": [
"https://www.kaggle.com/mrisdal/fake-news"
],
"parts": 1
},
"20-newsgroups": {
"num_records": 18846,
"record_format": "dict",
"file_size": 14483581,
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/20-newsgroups/__init__.py",
"license": "not found",
"fields": {
"topic": "name of topic (20 variant of possible values)",
"set": "marker of original split (possible values 'train' and 'test')",
"data": "",
"id": "original id inferred from folder name"
},
"description": "The notorious collection of approximately 20,000 newsgroup posts, partitioned (nearly) evenly across 20 different newsgroups.",
"checksum": "c92fd4f6640a86d5ba89eaad818a9891",
"file_name": "20-newsgroups.gz",
"read_more": [
"http://qwone.com/~jason/20Newsgroups/"
],
"parts": 1
},
"__testing_matrix-synopsis": {
"description": "[THIS IS ONLY FOR TESTING] Synopsis of the movie matrix.",
"checksum": "1767ac93a089b43899d54944b07d9dc5",
"file_name": "__testing_matrix-synopsis.gz",
"read_more": [
"http://www.imdb.com/title/tt0133093/plotsummary?ref_=ttpl_pl_syn#synopsis"
],
"parts": 1
},
"__testing_multipart-matrix-synopsis": {
"description": "[THIS IS ONLY FOR TESTING] Synopsis of the movie matrix.",
"checksum-0": "c8b0c7d8cf562b1b632c262a173ac338",
"checksum-1": "5ff7fc6818e9a5d9bc1cf12c35ed8b96",
"checksum-2": "966db9d274d125beaac7987202076cba",
"file_name": "__testing_multipart-matrix-synopsis.gz",
"read_more": [
"http://www.imdb.com/title/tt0133093/plotsummary?ref_=ttpl_pl_syn#synopsis"
],
"parts": 3
}
},
"models": {
"fasttext-wiki-news-subwords-300": {
"num_records": 999999,
"file_size": 1005007116,
"base_dataset": "Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/fasttext-wiki-news-subwords-300/__init__.py",
"license": "https://creativecommons.org/licenses/by-sa/3.0/",
"parameters": {
"dimension": 300
},
"description": "1 million word vectors trained on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens).",
"read_more": [
"https://fasttext.cc/docs/en/english-vectors.html",
"https://arxiv.org/abs/1712.09405",
"https://arxiv.org/abs/1607.01759"
],
"checksum": "de2bb3a20c46ce65c9c131e1ad9a77af",
"file_name": "fasttext-wiki-news-subwords-300.gz",
"parts": 1
},
"conceptnet-numberbatch-17-06-300": {
"num_records": 1917247,
"file_size": 1225497562,
"base_dataset": "ConceptNet, word2vec, GloVe, and OpenSubtitles 2016",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/conceptnet-numberbatch-17-06-300/__init__.py",
"license": "https://github.com/commonsense/conceptnet-numberbatch/blob/master/LICENSE.txt",
"parameters": {
"dimension": 300
},
"description": "ConceptNet Numberbatch consists of state-of-the-art semantic vectors (also known as word embeddings) that can be used directly as a representation of word meanings or as a starting point for further machine learning. ConceptNet Numberbatch is part of the ConceptNet open data project. ConceptNet provides lots of ways to compute with word meanings, one of which is word embeddings. ConceptNet Numberbatch is a snapshot of just the word embeddings. It is built using an ensemble that combines data from ConceptNet, word2vec, GloVe, and OpenSubtitles 2016, using a variation on retrofitting.",
"read_more": [
"http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14972",
"https://github.com/commonsense/conceptnet-numberbatch",
"http://conceptnet.io/"
],
"checksum": "fd642d457adcd0ea94da0cd21b150847",
"file_name": "conceptnet-numberbatch-17-06-300.gz",
"parts": 1
},
"word2vec-ruscorpora-300": {
"num_records": 184973,
"file_size": 208427381,
"base_dataset": "Russian National Corpus (about 250M words)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/word2vec-ruscorpora-300/__init__.py",
"license": "https://creativecommons.org/licenses/by/4.0/deed.en",
"parameters": {
"dimension": 300,
"window_size": 10
},
"description": "Word2vec Continuous Skipgram vectors trained on full Russian National Corpus (about 250M words). The model contains 185K words.",
"preprocessing": "The corpus was lemmatized and tagged with Universal PoS",
"read_more": [
"https://www.academia.edu/24306935/WebVectors_a_Toolkit_for_Building_Web_Interfaces_for_Vector_Semantic_Models",
"http://rusvectores.org/en/",
"https://github.com/RaRe-Technologies/gensim-data/issues/3"
],
"checksum": "9bdebdc8ae6d17d20839dd9b5af10bc4",
"file_name": "word2vec-ruscorpora-300.gz",
"parts": 1
},
"word2vec-google-news-300": {
"num_records": 3000000,
"file_size": 1743563840,
"base_dataset": "Google News (about 100 billion words)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/word2vec-google-news-300/__init__.py",
"license": "not found",
"parameters": {
"dimension": 300
},
"description": "Pre-trained vectors trained on a part of the Google News dataset (about 100 billion words). The model contains 300-dimensional vectors for 3 million words and phrases. The phrases were obtained using a simple data-driven approach described in 'Distributed Representations of Words and Phrases and their Compositionality' (https://code.google.com/archive/p/word2vec/).",
"read_more": [
"https://code.google.com/archive/p/word2vec/",
"https://arxiv.org/abs/1301.3781",
"https://arxiv.org/abs/1310.4546",
"https://www.microsoft.com/en-us/research/publication/linguistic-regularities-in-continuous-space-word-representations/?from=http%3A%2F%2Fresearch.microsoft.com%2Fpubs%2F189726%2Frvecs.pdf"
],
"checksum": "a5e5354d40acb95f9ec66d5977d140ef",
"file_name": "word2vec-google-news-300.gz",
"parts": 1
},
"glove-wiki-gigaword-50": {
"num_records": 400000,
"file_size": 69182535,
"base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-50/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 50
},
"description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword, 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-50.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "c289bc5d7f2f02c6dc9f2f9b67641813",
"file_name": "glove-wiki-gigaword-50.gz",
"parts": 1
},
"glove-wiki-gigaword-100": {
"num_records": 400000,
"file_size": 134300434,
"base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-100/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 100
},
"description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-100.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "40ec481866001177b8cd4cb0df92924f",
"file_name": "glove-wiki-gigaword-100.gz",
"parts": 1
},
"glove-wiki-gigaword-200": {
"num_records": 400000,
"file_size": 264336934,
"base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-200/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 200
},
"description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword, 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-200.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "59652db361b7a87ee73834a6c391dfc1",
"file_name": "glove-wiki-gigaword-200.gz",
"parts": 1
},
"glove-wiki-gigaword-300": {
"num_records": 400000,
"file_size": 394362229,
"base_dataset": "Wikipedia 2014 + Gigaword 5 (6B tokens, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-wiki-gigaword-300/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 300
},
"description": "Pre-trained vectors based on Wikipedia 2014 + Gigaword, 5.6B tokens, 400K vocab, uncased (https://nlp.stanford.edu/projects/glove/).",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-wiki-gigaword-300.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "29e9329ac2241937d55b852e8284e89b",
"file_name": "glove-wiki-gigaword-300.gz",
"parts": 1
},
"glove-twitter-25": {
"num_records": 1193514,
"file_size": 109885004,
"base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-25/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 25
},
"description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/).",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-25.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "50db0211d7e7a2dcd362c6b774762793",
"file_name": "glove-twitter-25.gz",
"parts": 1
},
"glove-twitter-50": {
"num_records": 1193514,
"file_size": 209216938,
"base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-50/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 50
},
"description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/)",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-50.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "c168f18641f8c8a00fe30984c4799b2b",
"file_name": "glove-twitter-50.gz",
"parts": 1
},
"glove-twitter-100": {
"num_records": 1193514,
"file_size": 405932991,
"base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-100/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 100
},
"description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/)",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-100.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "b04f7bed38756d64cf55b58ce7e97b15",
"file_name": "glove-twitter-100.gz",
"parts": 1
},
"glove-twitter-200": {
"num_records": 1193514,
"file_size": 795373100,
"base_dataset": "Twitter (2B tweets, 27B tokens, 1.2M vocab, uncased)",
"reader_code": "https://github.com/RaRe-Technologies/gensim-data/releases/download/glove-twitter-200/__init__.py",
"license": "http://opendatacommons.org/licenses/pddl/",
"parameters": {
"dimension": 200
},
"description": "Pre-trained vectors based on 2B tweets, 27B tokens, 1.2M vocab, uncased (https://nlp.stanford.edu/projects/glove/).",
"preprocessing": "Converted to w2v format with `python -m gensim.scripts.glove2word2vec -i <fname> -o glove-twitter-200.txt`.",
"read_more": [
"https://nlp.stanford.edu/projects/glove/",
"https://nlp.stanford.edu/pubs/glove.pdf"
],
"checksum": "e52e8392d1860b95d5308a525817d8f9",
"file_name": "glove-twitter-200.gz",
"parts": 1
},
"__testing_word2vec-matrix-synopsis": {
"description": "[THIS IS ONLY FOR TESTING] Word vecrors of the movie matrix.",
"parameters": {
"dimensions": 50
},
"preprocessing": "Converted to w2v using a preprocessed corpus. Converted to w2v format with `python3.5 -m gensim.models.word2vec -train <input_filename> -iter 50 -output <output_filename>`.",
"read_more": [],
"checksum": "534dcb8b56a360977a269b7bfc62d124",
"file_name": "__testing_word2vec-matrix-synopsis.gz",
"parts": 1
}
}
}
###Markdown
List of Models
###Code
for model_name, model_data in sorted(info['models'].items()):
print(
'%s (%d records): %s' % (
model_name,
model_data.get('num_records', -1),
model_data['description'][:40] + '...',
)
)
###Output
__testing_word2vec-matrix-synopsis (-1 records): [THIS IS ONLY FOR TESTING] Word vecrors ...
conceptnet-numberbatch-17-06-300 (1917247 records): ConceptNet Numberbatch consists of state...
fasttext-wiki-news-subwords-300 (999999 records): 1 million word vectors trained on Wikipe...
glove-twitter-100 (1193514 records): Pre-trained vectors based on 2B tweets,...
glove-twitter-200 (1193514 records): Pre-trained vectors based on 2B tweets, ...
glove-twitter-25 (1193514 records): Pre-trained vectors based on 2B tweets, ...
glove-twitter-50 (1193514 records): Pre-trained vectors based on 2B tweets, ...
glove-wiki-gigaword-100 (400000 records): Pre-trained vectors based on Wikipedia 2...
glove-wiki-gigaword-200 (400000 records): Pre-trained vectors based on Wikipedia 2...
glove-wiki-gigaword-300 (400000 records): Pre-trained vectors based on Wikipedia 2...
glove-wiki-gigaword-50 (400000 records): Pre-trained vectors based on Wikipedia 2...
word2vec-google-news-300 (3000000 records): Pre-trained vectors trained on a part of...
word2vec-ruscorpora-300 (184973 records): Word2vec Continuous Skipgram vectors tra...
###Markdown
Word2Vec Model
###Code
model = api.load("word2vec-google-news-300")
model.most_similar("glass")
model.most_similar("glass")
###Output
_____no_output_____
###Markdown
Glove Model I will be using the GloVe Twitter pretrained model with 200 dimensions or possibly the 25 dimension model
###Code
model_25 = api.load("glove-twitter-25")
model_200 = api.load("glove-twitter-200")
model.most_similar("glass")
model.most_similar("imo")
model_25.get_vector("football")
texts = [
["this", "is", "just", "a", "test", "too"],
["maybe", "the", "nfl", "will", "get", "shutdown", "too"],
["I'll", "bet", "on", "the", "49ers", "tonight"]
]
labels = [
["Rufus"],
["Jeff Ma"],
["A.I. Sports"]
]
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
maxlen = 10 # We will cut reviews after 100 words
training_samples = 3 # We will be training on 200 samples
validation_samples = 10000 # We will be validating on 10,000 samples
max_words = 10000 # We will only consider the top 10,000 words in the dataset
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=maxlen)
labels = np.asarray(labels)
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# Split the data into a training set and a validation set
# But first, shuffle the data, since we started from data
# where sample are ordered (all negative first, then all positive).
#indices = np.arange(data.shape[0])
#np.random.shuffle(indices)
#data = data[indices]
#labels = labels[indices]
#x_train = data[:training_samples]
#y_train = labels[:training_samples]
#x_val = data[training_samples: training_samples + validation_samples]
#y_val = labels[training_samples: training_samples + validation_samples]
embedding_dim = 300
embedding_matrix = np.zeros((max_words, embedding_dim))
data
labels
len(model_25.vocab.keys())
embedding_dim = 25
max_words = 19
embedding_matrix = np.zeros((max_words, embedding_dim))
word_index
nb_words = min(max_words, len(word_index))
# randomized weights for missing words... !??
embedding_matrix = (np.random.rand(nb_words, embedding_dim) - 0.5) / 5.0
len(embedding_matrix)
for word, i in word_index.items():
if i >= max_features: continue
if word in model_25:
embedding_vector = model_25.get_vector(word)
embedding_matrix[i] = embedding_vector
embedding_matrix
###Output
_____no_output_____
###Markdown
DNN
###Code
from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense
model_dnn = Sequential(name = "Dense Neural Network")
model_dnn.add(Embedding(max_words, embedding_dim, input_length=maxlen, name = "GloVe_Twitter-25"))
model_dnn.add(Flatten())
model_dnn.add(Dense(32, activation='relu'))
model_dnn.add(Dense(42, activation='softmax'))
## Freeze the embedding weights equal to our pretrained model
model_dnn.layers[0].set_weights([embedding_matrix])
model_dnn.layers[0].trainable = False
model_dnn.summary()
model_dnn.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model_dnn.fit(x_train, y_train,
epochs=10,
batch_size=32,
validation_data=(x_val, y_val))
model_dnn.save_weights('pre_trained_glove_model.h5')
###Output
_____no_output_____
###Markdown
1D CNN
###Code
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000
max_len = 500
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model_cnn = Sequential(name="1D_Convolutional_Neural_Network")
model_cnn.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model_cnn.add(layers.Conv1D(32, 7, activation='relu'))
model_cnn.add(layers.MaxPooling1D(2))
model_cnn.add(layers.Conv1D(32, 7, activation='relu'))
model_cnn.add(layers.GlobalMaxPooling1D())
model_cnn.add(layers.Dense(1))
## Freeze the embedding weights equal to our pretrained model
model_cnn.layers[0].set_weights([embedding_matrix])
model_cnn.layers[0].trainable = False
# print model summary
model_cnn.summary()
model_cnn.compile(optimizer=RMSprop(lr=1e-4),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
###Output
_____no_output_____
###Markdown
RNN
###Code
# define model
model_rnn = Sequential(name="Recurrnet_Neural_Network")
model_rnn.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model_rnn.add(LSTM(100, return_sequences=True))
model_rnn.add(LSTM(100))
model_rnn.add(Dense(100, activation='relu'))
model_rnn.add(Dense(42, activation='softmax'))
## Freeze the embedding weights equal to our pretrained model
model_rnn.layers[0].set_weights([embedding_matrix])
model_rnn.layers[0].trainable = False
# print model summary
model_rnn.summary()
###Output
_____no_output_____ |
Pyramid Scene Parsing Network (PSPNet) Review/code/์ด์์๋ PSPNet_Code Review.ipynb | ###Markdown
PSPNet์ฝ๋์ ๊ตฌํ์๋ https://github.com/hszhao/semseg ๋ฅผ ์ฐธ๊ณ ํ์์ต๋๋ค. ์์1. Dilated ResNet ์ฝ๋2. Pyramid Parsing Module ์ฝ๋3. PSPNet ์ ์ฒด ์ฝ๋ 1. Dilated Residual Network (Dilated ResNet)
###Code
import torch
import torch.nn as nn
from torchinfo import summary
import torch.nn.functional as F
import resnet as models
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class DilatedResNet(nn.Module):
def __init__(self, layers=50, pretrained=True):
super(DilatedResNet, self).__init__()
# ResNet 50
if layers == 50:
resnet = models.resnet50(pretrained=pretrained)
# ResNet 101
elif layers == 101:
resnet = models.resnet101(pretrained=pretrained)
# ResNet 152
else:
resnet = models.resnet152(pretrained=pretrained)
# ResNet with dilated network
self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
resnet.conv2, resnet.bn2, resnet.relu,
resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool)
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
for n, m in self.layer3.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
for n, m in self.layer4.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
def forward(self, x, y=None):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x_tmp = self.layer3(x)
x = self.layer4(x_tmp)
return x
inp = torch.rand(4, 3, 200, 200)
layers = 50
resnet = DilatedResNet(layers=layers, pretrained=False)
output = resnet(inp)
print(f"Dilated ResNet {layers}'s output size : {output.size()}")
###Output
Dilated ResNet 50's output size : torch.Size([4, 2048, 25, 25])
###Markdown
2. Pyramid Pooling Module
###Code
class PPM(nn.Module):
def __init__(self, in_dim, reduction_dim, bins):
super(PPM, self).__init__()
self.features = []
# bins = (1, 2, 3, 6) : 1x1, 2x2, 3x3, 6x6
for bin in bins:
self.features.append(nn.Sequential(
# Pyramid scale์ ๋ฐ๋ผ ๊ฐ๊ฐ์ pooling์ ์์ฑ
nn.AdaptiveAvgPool2d(bin),
# 1/N์ผ๋ก dimension reduction (reduction_dim = 4, pyramid level์ ์)
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(reduction_dim),
nn.ReLU(inplace=True)
))
self.features = nn.ModuleList(self.features)
def forward(self, x):
x_size = x.size()
out = [x]
for f in self.features:
out.append(F.interpolate(f(x), x_size[2:], mode='bilinear', align_corners=True))
# ๊ฐ๊ฐ์ pyramid scale์ ๋ฐ๋ฅธ pooling ๊ฒฐ๊ณผ๋ค์ concatenate
return torch.cat(out, 1)
# input features dim : 2048
in_dim = output.size()[1]
# pyramid pooling levels : 1x1, 2x2, 3x3, 6x6
bins = (1, 2, 3, 6)
# dimension reduction : 1 / N
reduction_dim = int(in_dim / len(bins)) # N = 4
ppm = PPM(in_dim=in_dim, reduction_dim=reduction_dim, bins=bins)
output = ppm(output)
print(f"Pyramid Pooling Module's output size : {output.size()}")
###Output
Pyramid Pooling Module's output size : torch.Size([4, 4096, 25, 25])
###Markdown
AdaptiveAvgPool2d
###Code
inp = torch.tensor([[[[1., 2., 3.], [4., 5., 6.], [7., 8., 9]]]], dtype = torch.float)
print(inp.shape)
print(inp)
out = nn.AdaptiveAvgPool2d(2)(inp)
print(out)
# print(torch.tensor(
# [[[(1. + 2. + 4. + 5.) / 4, (2. + 3. + 5. + 6.) / 4],
# [(4. + 5. + 7. + 8.) / 4, (5. + 6. + 8. + 9.) / 4]]]))
# Global Average Pooling
out = nn.AdaptiveAvgPool2d(1)(inp)
print(out)
###Output
tensor([[[[5.]]]])
###Markdown
3. PSPNet ์ ์ฒด ์ฝ๋
###Code
class PSPNet(nn.Module):
def __init__(self, layers=50, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, pretrained=True):
super(PSPNet, self).__init__()
# output์ ํฌ๊ธฐ๋ฅผ ์๋ณธ ์ด๋ฏธ์ง์ ๋์ผํ๊ฒ ๋ณต์ํ๊ธฐ ์ํ ๊ฐ
# Feature map์ ํฌ๊ธฐ๋ ์๋ณธ ์ด๋ฏธ์ง์ 1/8
self.zoom_factor = zoom_factor
self.criterion = nn.CrossEntropyLoss()
# ResNet
if layers == 50:
resnet = models.resnet50(pretrained=pretrained)
elif layers == 101:
resnet = models.resnet101(pretrained=pretrained)
else:
resnet = models.resnet152(pretrained=pretrained)
# ResNet with dilated network
self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
resnet.conv2, resnet.bn2, resnet.relu,
resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool)
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
for n, m in self.layer3.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
for n, m in self.layer4.named_modules():
if 'conv2' in n:
m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
elif 'downsample.0' in n:
m.stride = (1, 1)
# Dilated ResNet output size : torch.Size([4, 2048, 60, 60])
fea_dim = 2048
self.ppm = PPM(in_dim = fea_dim, reduction_dim = int(fea_dim / len(bins)), bins=bins)
# Pyramid Pooling Module output size : torch.Size([4, 4096, 60, 60])
fea_dim *= 2 # 4096
self.cls = nn.Sequential(
nn.Conv2d(fea_dim, 512, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(p=dropout),
nn.Conv2d(512, classes, kernel_size=1)
)
if self.training:
self.aux = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Dropout2d(p=dropout),
nn.Conv2d(256, classes, kernel_size=1)
)
def forward(self, x, y=None):
x_size = x.size()
# Input image's height, width
h = int((x_size[2] - 1) / 8 * self.zoom_factor + 1)
w = int((x_size[3] - 1) / 8 * self.zoom_factor + 1)
# Resnet with dilated network
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x_tmp = self.layer3(x)
x = self.layer4(x_tmp)
# Pyramid Pooling Module
x = self.ppm(x)
# Master branch
x = self.cls(x)
# ์๋ณธ ์ด๋ฏธ์ง ํฌ๊ธฐ๋ก upsampling
if self.zoom_factor != 1:
x = F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
if self.training:
# Auxiliary Loss๋ training์์๋ง ์ฌ์ฉ
aux = self.aux(x_tmp)
# ์๋ณธ ์ด๋ฏธ์ง ํฌ๊ธฐ๋ก upsampling
if self.zoom_factor != 1:
aux = F.interpolate(aux, size=(h, w), mode='bilinear', align_corners=True)
main_loss = self.criterion(x, y)
aux_loss = self.criterion(aux, y)
return x.max(1)[1], main_loss, aux_loss
else:
return x
inp = torch.rand(4, 3, 473, 473).to(device)
layers = 50
pspnet = PSPNet(layers=layers, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, pretrained=False).to(device)
pspnet.eval()
output = pspnet(inp)
print(f"PSPNet with Dilated ResNet {layers}'s output size : {output.size()}")
###Output
PSPNet with Dilated ResNet 50's output size : torch.Size([4, 2, 473, 473])
|
Modulo2/1. Estructuras de Control Iterativas.ipynb | ###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
'#'*5
###Output
Altura: 4
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
n = int(input("Introduce un nรบmero entero positivo mayor que 2: "))
i = 2
while n % i != 0:
i += 1
if i == n:
print(str(n) + " es primo")
else:
print(str(n) + " no es primo")
###Output
Introduce un nรบmero entero positivo mayor que 2: 4
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista_3 = []
for letra in lista_1:
if letra in lista_2 and letra not in lista_3:
lista_3.append(letra)
print(lista_3)
###Output
['h', 'o', 'l', 'a', ' ', 'u', 'n']
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n=4
'#'*5
#Formando un triรกngulo
n=int(input("Ingrese un numero entero: "))
c=0
while c <= n:
print("#"*c)
c +=1
###Output
Ingrese un numero entero: 7
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
numero=int(input("Ingreso un numero entero: "))
c=0
for i in range(1,numero+1):
if (numero%i)==0:
c += 1
if c == 2:
print("El numero es PRIMO")
else:
print("EL numero NO ES PRIMO")
###Output
_____no_output_____
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
con1=set(lista_1)
con2=set(lista_2)
lista_3=list(con1.union(con2))
lista_3
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
# para el aรฑo 2008,2009,2010 no debo entregar informe
# si el aรฑo es 2007 parar el bucle
anio = 2001
while anio <= 2012:
#if anio == 2007:
# print('salida del bucle')
# break # salir del bucle
if anio in [2008,2009,2010]:
print(f'no presentar informe anio {anio}')
anio += 1
continue # continua a la sigueinte iteracion
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 1
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
no presentar informe anio 2008
no presentar informe anio 2009
no presentar informe anio 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento.
###Code
c = 0
while c <= 5:
print("c vale ",c)
if c == 4:
print("Rompemos el bucle cuando c vale ", c)
break
c+=1
print("bucle finalizado")
###Output
c vale 0
c vale 1
c vale 2
c vale 3
c vale 4
Rompemos el bucle cuando c vale 4
bucle finalizado
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
flag = True
while flag:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input() # me devuelve un string ''
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
break
#flag = False
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print(f"El resultado de la suma es: {n1+n2}")
break
#flag = False
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
#flag = False
break
else:
print("Comando desconocido, vuelve a intentarlo")
print("otra cosa xd ")
pass # end while
print("finalizo el programa")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Suma de los dos numeros
2) Restar el 1er nro menos el 2do nro
3) Multiplicar los dos nros""")
opcion = input("Ingrese la opcion ")
nro1 = int(input("Ingrese el 1er nro "))
nro2 = int(input("Ingrese el 2do nro "))
if opcion == '1':
print(f"L suma de los nros es: {nro1 + nro2}")
break
elif opcion == '2':
print(f"La resta de los nros es: {nro1 - nro2}")
break
elif opcion =='3':
print(f"La multiplicacion de los nros es: {nro1*nro2}")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Suma de los dos numeros
2) Restar el 1er nro menos el 2do nro
3) Multiplicar los dos nros
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for nombre in mi_lista:
print(nombre)
num1, num2 = [12,15]
print(num1)
print(num2)
for i,nombre in enumerate(mi_lista):
print(i, nombre)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio','Juan', 'Pedro', 'Herminio','Juan']
for indice,nombre in enumerate(mi_lista):
# print(indice, nombre)
if nombre == 'Juan':
mi_lista[indice] = 'Maria'
#mi_lista.remove("Juan")
print(mi_lista)
mi_lista = ['Juan', 'Antonio','Juan', 'Pedro', 'Herminio','Juan']
mi_lista.count("Juan") # cantidad de veces de 'Juan' en la lista
for i in range(mi_lista.count("Juan")):
mi_lista.remove("Juan")
print(mi_lista)
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'key1':1,'key2':2,'key3':3}
for key,value in dicx.items():
if key == 'key1':
dicx[key] = 10
dicx
dicx.items()
dicx['key1'] = 10
dicx['nombres'] = ['Maria', 'Antonio', 'Maria', 'Pedro', 'Herminio', 'Maria']
dicx
# Iterando sobre strings
texto = 'Hola Mundo'
for i, letra in enumerate(texto):
print(i, letra)
texto= 'Hola Mundo'
texto
texto[1] = "@"
## reeemplazar 'o' por 'x' en la cadena texto
texto= 'Hola Mundo'
new_texto = ""
for i, l in enumerate(texto):
if l == 'o':
# agrego 'x'
new_texto += 'x'
else:
# agrego la letra
new_texto += l
new_texto
texto_s = ''
for l in texto:
if l == 'o':
texto_s += 'x'
continue
texto_s += l
print(l)
texto_s
texto_s = ''
for l in texto:
texto_s = texto_s + l
print(texto_s)
texto_s
###Output
H
Ho
Hol
Hola
Hola
Hola M
Hola Mu
Hola Mun
Hola Mund
Hola Mundo
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# range -> no es un lista
# si quiero convetir el range a una lista debo hacer
[*range(3000,4000,100)]
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print(f"Informes del Aรฑo {anio}")
# por defecto el inicio de la funcion es 0
for i in range(10):
print(f'#{i}')
###Output
#0
#1
#2
#3
#4
#5
#6
#7
#8
#9
###Markdown
EJERCICIOS----------------------------- 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
#1.cuantos nรบmeros quiere introducir
cantidad = int(input('Cantidad de numeros a introducir: '))
cantidad
#2. lee todos los nรบmeros
#a = int(input("numero 1"))
#b = int(input("numero 2"))
#c = int(input("numero 3"))
lista_numeros = []
for i in range(cantidad):
num = int(input(f"Ingrese el numero {i +1}: "))
lista_numeros.append(num)
lista_numeros
#3.media aritmรฉtica
#p = (a + b + c) / cantidad #3
promedio = 0
for num in lista_numeros:
promedio += num
promedio = promedio / cantidad
print(f"La media aritmรฉtica de los numeros es {promedio}")
#sum(lista_numeros)/cantidad
#### funcions
def calculo_media(lista_numeros):
promedio = 0
for num in lista_numeros:
promedio += num
return promedio / cantidad
calculo_media([1,2,5])
###Output
_____no_output_____
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
#1. Escribir un programa que pida al usuario un nรบmero entero
h = int(input('Cantidad de numeros a introducir: '))
h
#2.muestre por pantalla un triรกngulo rectรกngulo altura 'h'
for i in range(1, h +1 ):
print('#' *i)
'#'*4
for i in range(1, h + 1):
print(i)
###Output
1
2
3
4
###Markdown
###Code
h = 4
for i in range(1, h+1):
print(' '* (h-i) + '#' * i)
' '*3 + '#' * 1
' ##'
' '*2 + '#' * 2
' ###'
' '*1 + '#' * 3
###Output
_____no_output_____
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
#c+=1
d="hola"
print(d,"mundo")
###Output
hola mundo
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Luis']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
lista_num
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n=4
'#'*5
numero=int(input("Introduce un numero entero:"))
tri=1
while tri<=numero:
print("#"*tri)
tri+=1
###Output
#
##
###
####
#####
######
#######
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
num=int(input("Escribe un numer entero: "))
lista=[]
div=1
while div<=num:
lisdiv=num/div
lista.append(lisdiv)
div+=1
lista
###Output
_____no_output_____
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista3=[]
for let in lista_1:
if let in lista_2 and let not in lista3:
lista3.append(let)
lista3
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(str(anio)))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
# print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while(True):
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
###Output
val1 1
val2 2
val3 3
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas 2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4 3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(str(anio)))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
# print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while(True):
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
###Output
val1 1
val2 2
val3 3
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad=int(input("Ingrese la cantidad de numeros a ingresar:"))
lista_num=[]
for i in range(cantidad):
num=float(input(f"Ingrese el numero n{i+1}:"))
lista_num.append(num)
lista_num
suma_ = 0
for e in lista_num:
suma_=suma_+e
media=suma_/cantidad
print("El promedio de la suma es :",media)
# FOrma 4
i=1
suma=0
while i<= cantidad:
num = float(input((f"Ingrese el numero n{i}:")))
suma=suma+ num
i+=1
print("El promedio es:", suma/cantidad)
###Output
Ingrese el numero n1: 8
Ingrese el numero n2: 10
Ingrese el numero n3: 4
Ingrese el numero n4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n = int(input("Introduce la altura del triรกngulo (entero positivo): "))
for i in range(n):
print("#"*(i+1))
###Output
Introduce la altura del triรกngulo (entero positivo): 6
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
n=int(input("Ingrese un nรบmero entero:"))
i=2
while n%i!=0:
i+=1
if i==n:
print(str(n),"es primo")
else:
print(str(n)," no es primo")
###Output
Ingrese un nรบmero entero: 47
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista_3=[]
for letra in lista_1:
if letra in lista_2 and letra not in lista_3:
lista_3.append(letra)
print(lista_3)
###Output
['h', 'o', 'l', 'a', ' ', 'u', 'n']
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido.
###Code
n = int(input("Altura: "))
for i in range(n):
print("*"*(i+1))
###Output
Altura: 3
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
n = int(input("Introduce un nรบmero entero positivo mayor que 2: "))
i = 2
while n % i != 0:
i += 1
if i == n:
print(str(n) + " es primo")
else:
print(str(n) + " no es primo")
###Output
_____no_output_____
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista_3= []
for letra in lista_1:
if letra in lista_2 and letra not in lista_3:
lista_3.append(letra)
print(lista_3)
###Output
['h', 'o', 'l', 'a', ' ', 'u', 'n']
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 1
# anio += 1
###Output
_____no_output_____
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
bandera = True
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")3
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
#bandera = False
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
n1 = float(input("Digite primer nรบmero: "))
n2 = float(input("Digite segundo nรบmero: "))
while(True):
print("****MENU****")
print("1.- Sumar dos nรบmeros")
print("2.- Restar dos nรบmeros")
print("3.- Multiplicar dos nรบmeros")
print("4.- Salir del programa")
opcion = input("Opciรณn: ")
if opcion == '1':
resultado = n1 + n2
elif opcion == '2':
resultado = n1-n2
elif opcion == '3':
resultado =n1*n2
elif opcion =='4':
break
else:
print("Digite una opciรณn vรกlida")
continue
print("El resultado es:",resultado)
###Output
Digite primer nรบmero: 15
Digite segundo nรบmero: 12
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
# print(indice, nombre)
if nombre == 'Juan':
mi_lista[indice] = 'Maria'
mi_lista
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
dicx.items()
# Iterando sobre strings
texto = 'Hola Mundo'
for i, letra in enumerate(texto):
print(i, letra)
texto_s = ''
for l in texto:
if l == 'o':
texto_s += 'x'
continue
texto_s += l
texto_s
###Output
_____no_output_____
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
[*range(1,8,2)]
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
text = 'hola que tal ?'
for p in text:
print(p)
###Output
h
o
l
a
q
u
e
t
a
l
?
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
# 1. Leer la cantidad de nรบmeros a ingresar
n = int(input("Cuantos nรบmeros desea introducir? "))
n
lista_numeros = []
for num in range(n):
num_input = float(input("introdusca el {} nรบmero: ".format(num+1)))
lista_numeros.append(num_input)
lista_numeros
# MEDIA ARITMETICA = suma todos numeros / cantidad de numeros
media = 0 # valor inicial
for numero in lista_numeros:
media = media + numero
media = media / n
media
# soluciรณn 2
sum(lista_numeros) / n
###Output
_____no_output_____
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n = int(input("Cuantos nรบmeros desea introducir? "))
n
# en python es posible multiplicar las cadenas de texto
'#'*2
for i in range(1,n+1):
print('#'*i)
# Triangulo invertido
" "*3 + "#" * 1
for i in range(1,n+1):
print(" " * (n-i) + "#" * i)
###Output
#
##
###
####
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n = int(input("Altura: "))
for i in range(n):
print("*"*(i+1))
###Output
Altura: 8
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
n = int(input("Introduce un nรบmero entero positivo mayor que 2: "))
i = 2
while n % i != 0:
i += 1
if i == n:
print(str(n) + " es primo")
else:
print(str(n) + " no es primo")
###Output
_____no_output_____
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista_3 = []
for letra in lista_1:
if letra in lista_2 and letra not in lista_3:
lista_3.append(letra)
print(lista_3)
###Output
['h', 'o', 'l', 'a', ' ', 'u', 'n']
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n=4
'#'*5
###Output
_____no_output_____
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
# para el aรฑo 2008,2009,2010 no debo entregar informe
anio = 2001
while anio <= 2012:
if anio in [2008,2009 ,2010]:
anio += 1
continue
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 1
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento.
###Code
c = 0
while c <= 5:
print("c vale ",c)
if c == 4:
print("Rompemos el bucle cuando c vale ", c)
break
c+=1
###Output
c vale 0
c vale 1
c vale 2
c vale 3
c vale 4
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
_____no_output_____
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
bandera = True
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")2
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print(f"El resultado de la suma es: {n1+n2}")
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
#bandera = False
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Sumar dos nรบmeros
2) Restar dos nรบmeros
3) Multiplicar dos nรบmeros
4) Salir""")
opcion = input()
if opcion == '1':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la resta es: ",n1 - n2)
elif opcion =='3':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la multiplicaciรณn es: ",n1 * n2)
elif opcion =='4':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Sumar dos nรบmeros
2) Restar dos nรบmeros
3) Multiplicar dos nรบmeros
4) Salir
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for nombre in mi_lista:
print(nombre)
num1, num2 = (12,15)
print(num1)
print(num2)
for indice,nombre in enumerate(mi_lista):
print(indice, nombre)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio','Juan']
for indice,nombre in enumerate(mi_lista):
# print(indice, nombre)
if nombre == 'Juan':
mi_lista[indice] = 'Maria'
mi_lista
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'key1':1,'key2':2,'key3':3}
for key,value in dicx.items():
if key == 'key1':
dicx[key] = 10
dicx
dicx.items()
# Iterando sobre strings
texto = 'Hola Mundo'
for i, letra in enumerate(texto):
print(i, letra)
texto= 'Hola Mundo'
texto
texto_s = ''
for l in texto:
if l == 'o':
texto_s += 'x'
continue
texto_s += l
texto_s
texto_s = ''
for l in texto:
texto_s = texto_s + l
texto_s
###Output
_____no_output_____
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
[*range(5,0,-2)]
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print(f"Informes del Aรฑo {anio}")
# por defecto el inicio de la funcion es 0
for i in range(10):
print(f'#{i}')
###Output
#0
#1
#2
#3
#4
#5
#6
#7
#8
#9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
# 1. Solicitar la cantidad de numeros a introducir
# 2. Por cada numero a introducir, voy a preguntar por el numero y los almaceno en una lista
# 3. Sumar los numeros de mi lista
# 4. Dividir la suma entre la cantidad
# 5. Mostrar el resultado
cantidad_numeros = int(input('Ingrese la cantidad de numeros a introducir: '))
cantidad_numeros
lista_numeros = []
for i in range(cantidad_numeros):
x = float(input(f'Ingrese el numero {i+1}: '))
lista_numeros.append(x)
lista_numeros
suma_lista = 0
for num in lista_numeros:
suma_lista += num
suma_lista
sum(lista_numeros)
media_aritmetica = suma_lista/cantidad_numeros
print(f'La media Aritmetica de los elementos es {media_aritmetica}')
###Output
La media Aritmetica de los elementos es 14.25
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
# 1. Solicitar la altura dle triangulo
#
'#'*4
###Output
_____no_output_____
###Markdown
###Code
' ' * 0 + '#' * 4
###Output
_____no_output_____
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
#c = c+1
if c==3 or c==4:
# print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
### print("Bienvenido al menรบ interactivo")
while(True):
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
dicx.items
###Output
_____no_output_____
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
[*range(1,6,2)]
##es una especie de lista
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
text= hola que tal
for
###Output
_____no_output_____
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('ingrese la cantidad de numeros a introducir'))
numero = []
for n in range(cantidad):
x = int(input('ingrese el numero a ser sumado: '))
numero.append(x)
sum(numero)/cantidad
###Output
_____no_output_____
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
q=int(input("ingrese la cantidad de #s: "))
# forma 1
for i in range(q):
print('#'*(i+1))
# prueba
'#'*4
# forma 2
nums=[]
v=1
for elem in range(v,q+1):
nums.append(v*("#"))
v=v+1
for b in (nums):
print (b)
###Output
#
##
###
####
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
numero = int(input("ingrese un numero entero:"))
numero
primo = True
for n in range(2, numero, 1):
if numero % n ==0:
primo = False
break
7 % 2?
7 % 3?
7 % 4?
7 % 5?
7 % 6?
if primo:
print(f'el numero: {numero} es primo')
else:
print(f'el numero: {numero} no es primo')
###Output
el numero: 6 no es primo
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
l1=set(lista_1)
l2=set(lista_2)
l1
l2
l3 = l1.intersection(l2)
list(l3)
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(str(anio)))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
# print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while(True):
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
n1 = float(input("Introduce un nรบmero: ") )
n2 = float(input("Introduce otro nรบmero: ") )
opcion = 0
print("""
ยฟQuรฉ quieres hacer?
1) Sumar los dos nรบmeros
2) Restar los dos nรบmeros
3) Multiplicar los dos nรบmeros
""")
opcion = int(input("Introduce un nรบmero: ") )
if opcion == 1:
print("La suma de",n1,"+",n2,"es",n1+n2)
elif opcion == 2:
print("La resta de",n1,"-",n2,"es",n1-n2)
elif opcion == 3:
print("El producto de",n1,"*",n2,"es",n1*n2)
else:
print("Opciรณn incorrecta")
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
###Output
val1 1
val2 2
val3 3
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
suma = 0
numeros = int(input("ยฟCuรกntos nรบmeros quieres introducir? ") )
for x in range(numeros):
suma += float(input("Introduce un nรบmero: ") )
print("Se han introducido", numeros, "nรบmeros que en total han sumado",
suma, "y la media es", suma/numeros)
###Output
ยฟCuรกntos nรบmeros quieres introducir? 3
Introduce un nรบmero: 1
Introduce un nรบmero: 2
Introduce un nรบmero: 3
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
m=int(input("Ingrese un numero entero: "))
i=1
while i<=m:
print("#"*i)
i+=1
###Output
Ingrese un numero entero: 1
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
n = int(input("Introduce un nรบmero entero positivo mayor que 2: "))
i = 2
while n % i != 0:
i += 1
if i == n:
print(str(n) + " es primo")
else:
print(str(n) + " no es primo")
###Output
_____no_output_____
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('Ingrese la cantidad de numeros a ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 8
Ingrese el numero 2: 10
Ingrese el numero 3: 4
Ingrese el numero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
n=4
'#'*5
###Output
_____no_output_____
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no. 4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
###Output
_____no_output_____
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(str(anio)))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
# print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while(True):
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
num1 = int(input('Ingresa el primer nรบmero: '))
num2 = int(input('Ingresa el segundo nรบmero: '))
while (True):
print("""ยฟQuรฉ es lo que quieres hacer?
1) Mostrar una suma de los dos nรบmeros
2) Mostrar una resta de los dos nรบmeros
3) Mostrar una multiplicaciรณn de los dos nรบmeros
4) Salir""")
opcion = int(input())
if opcion == 1:
print(num1 + num2)
elif opcion == 2:
print(num1-num2)
elif opcion == 3:
print(num1*num2)
elif opcion == 4:
print('Hasta luego')
break
else:
print('Opciรณn invalida, vuelva a intentarlo')
###Output
Ingresa el primer nรบmero: 15
Ingresa el segundo nรบmero: 2
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
###Output
val1 1
val2 2
val3 3
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input('ยฟCuรกntos nรบmeros desea introducir en la lista?'))
i = 0
lista = []
suma = 0
while(i < cantidad):
num = int(input(f'Ingresa el nรบmero {i+1}'))
lista.append(num)
i+=1
for indice, numero in enumerate(lista):
suma += numero
if indice == cantidad - 1:
print(f'La media aritmรฉtica es {suma/cantidad}')
break
#forma rรกpida
cantidad = int(input('Ingresa la cantidad de nรบmeros a introducir:' ))
suma = 0
for i in range(cantidad):
num = float(input(f'Ingresa el nรบmero {i+1}: '))
suma += num
print(f'La media aritmรฉtica es: {suma/cantidad}')
###Output
Ingresa la cantidad de nรบmeros a introducir: 4
Ingresa el nรบmero 1: 4
Ingresa el nรบmero 2: 3
Ingresa el nรบmero 3: 2
Ingresa el nรบmero 4: 1
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
tamanio = int(input('Introduce la altura del triรกngulo rectรกngulo: '))
for num in range(1,tamanio+1):
print('#'*num)
###Output
Introduce la altura del triรกngulo rectรกngulo: 4
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
numero = int(input('Ingresa un nรบmero entero: '))
if numero == 2:
print('El nรบmero es primo')
elif numero > 2:
for i in range(2,numero):
if numero%i == 0:
print('El nรบmero no es primo')
break
else:
print('El nรบmero es primo')
break
else:
print('El nรบmero no es primo')
###Output
Ingresa un nรบmero entero: 7
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
conjunto_1 = set(lista_1)
conjunto_2 = set(lista_2)
conjunto_3 = conjunto_1.intersection(conjunto_2)
lista_3 = list(conjunto_3)
print(lista_3)
###Output
[' ', 'h', 'o', 'a', 'l', 'n', 'u']
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(str(anio)))
anio += 1
# anio = anio + 2
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento. No se ejecutarรก el Else, ya que รฉste sรณlo se llama al finalizar la iteraciรณn.:
###Code
c = 0
while c <= 5:
c+=1
if c == 4:
print("Rompemos el bucle cuando c vale", c)
break
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 3
Rompemos el bucle cuando c vale 4
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
# print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
while(True):
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input()
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print("El resultado de la suma es: ",n1+n2)
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido")
a = int(input("Ingrese primer nรบmero: "))
b = int(input("Ingrese segundo nรบmero: "))
while True:
print("""Elija un opciรณn
1. Sumar dos numeros
2. Restar dos numeros
3. Multiplicar dos numeros
4. Salir
""")
opc = input()
if opc == '1':
respuesta = a + b
print("La suma es: ",respuesta)
elif opc == '2':
respuesta = a - b
print("La Resta es: ",respuesta)
elif opc == '3':
respuesta = a * b
print("La Multiplicacion es: ",respuesta)
elif opc == '4':
print("Hasta luego")
break
else:
print("Ingrese un numero de la lista")
while True:
a = input("Ingrese primer nรบmero: ")
try:
a = float(a)
break
except:
print('dato ingresado no es un numero')
a
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for elemento in mi_lista:
print(elemento)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for indice,nombre in enumerate(mi_lista):
if nombre=='Juan':
mi_lista[indice]='Maria'
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'val1':1,'val2':2,'val3':3}
for key,val in dicx.items():
print(key,val)
cadena = 'hola mundo'
for l in cadena:
print(l)
###Output
h
o
l
a
m
u
n
d
o
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print("Informes del Aรฑo", str(anio))
# por defecto el inicio de la funcion es 0
for i in range(10):
print(i)
###Output
0
1
2
3
4
5
6
7
8
9
###Markdown
EJERCICIOS 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
cantidad = int(input ('ingrese la cantidad de numeros que sea ingresar: '))
lista_num = []
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
lista_num.append(num)
#lista_num
len(lista_num) # cantidad de elementos
suma = 0
for e in lista_num:
suma = suma + e
suma
media_aritmetica = suma / cantidad
media_aritmetica
# forma 2
sum(lista_num)/cantidad
# forma 3
suma = 0
for i in range(cantidad):
num = float(input(f'Ingrese el numero {i+1}: '))
suma = suma + num
pass # termina bucle
print('el promedio es: ',suma/cantidad)
# forma 4
i= 1
suma = 0
while i <= cantidad:
num = float(input(f'Ingrese el numero {i}: '))
suma = suma + num
i +=1
print('el promedio es: ',suma/cantidad)
###Output
Ingrese el numero 1: 2
Ingrese el numero 2: 3
Ingrese el numero 3: 4
Ingrese el numero 4: 5
el promedio es: 3.5
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
numero = int(input ('ingresar numero entero: '))
for i in range(numero):
for j in range(i+1):
print("#", end="")
print("")
###Output
ingresar numero entero: 4
#
##
###
####
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
numero = int(input("Introduce un nรบmero entero positivo: "))
for i in range( 2, numero):
if numero % i == 0:
break
if (i + 1) == numero:
print(str(numero) + " es primo")
else:
print(str(numero) + " no es primo")
###Output
Introduce un nรบmero entero positivo: 4
4 no es primo
###Markdown
4. Dadas dos listas, debes generar una tercera con todos los elementos que se repitan en ellas, pero no debe repetirse ningรบn elemento en la nueva lista:
###Code
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista_1 = ["h",'o','l','a',' ', 'm','u','n','d','o']
lista_2 = ["h",'o','l','a',' ', 'l','u','n','a']
lista_3 = []
for letra in lista_1:
if letra in lista_2 and letra not in lista_3:
lista_3.append(letra)
print(lista_3)
###Output
['h', 'o', 'l', 'a', ' ', 'u', 'n']
###Markdown
Estructuras de Control Iterativas A diferencia de las estructuras de control condicionales, las iterativas (tambiรฉn llamadas cรญclicas o bucles), nos permiten ejecutar un mismo cรณdigo, de manera repetida, mientras se cumpla una condiciรณn. En Python se dispone de dos estructuras cรญclicas: - El bucle while - El bucle for Las veremos en detalle a continuaciรณn. Bucle While------------------------------ Se basa en repetir un bloque a partir de evaluar una condiciรณn lรณgica, siempre que รฉsta sea True. Queda en las manos del programador decidir el momento en que la condiciรณn cambie a False para hacer que el While finalice. EjemploMientras que aรฑo sea menor o igual a 2012, imprimir la frase โInformes del Aรฑo aรฑoโ
###Code
# -*- coding: utf-8 -*
# para el aรฑo 2008,2009,2010 no debo entregar informe
# si el aรฑo es 2007 parar el bucle
anio = 2001
while anio <= 2012:
print("Informes del Aรฑo {}".format(anio))
anio += 1
# anio = anio + 1
# anio += 1
###Output
Informes del Aรฑo 2001
Informes del Aรฑo 2002
Informes del Aรฑo 2003
Informes del Aรฑo 2004
Informes del Aรฑo 2005
Informes del Aรฑo 2006
Informes del Aรฑo 2007
Informes del Aรฑo 2008
Informes del Aรฑo 2009
Informes del Aรฑo 2010
Informes del Aรฑo 2011
Informes del Aรฑo 2012
###Markdown
Si miras la รบltima lรญnea:anio += 1Podrรกs notar que en cada iteraciรณn, incrementamos el valor de la variable que condiciona el bucle (anio). Si no lo hiciรฉramos, esta variable siempre serรญa igual a 2001 y el bucle se ejecutarรญa de forma infinita, ya que la condiciรณn (anio <= 2012) siempre se estarรญa cumpliendo. - Instruccion break Sirve para "romper" la ejecuciรณn del While en cualquier momento.
###Code
c = 0
while c <= 5:
print("c vale ",c)
if c == 4:
print("Rompemos el bucle cuando c vale ", c)
break
c+=1
print("Bucle finalizado !!! ")
###Output
c vale 0
c vale 1
c vale 2
c vale 3
c vale 4
Rompemos el bucle cuando c vale 4
Bucle finalizado !!!
###Markdown
- Instruccion continue Sirve para "saltarse" la iteraciรณn actual sin romper el bucle.
###Code
c = 0
while c <= 5:
c+=1
if c==3 or c==4:
print("Continuamos con la siguiente iteraciรณn", c)
continue
print("c vale",c)
###Output
c vale 1
c vale 2
Continuamos con la siguiente iteraciรณn 3
Continuamos con la siguiente iteraciรณn 4
c vale 5
c vale 6
###Markdown
Ejemplo Menรบ Interactivo
###Code
print("Bienvenido al menรบ interactivo")
#bandera = True
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir""")
opcion = input() # me devuelve un string ''
if opcion == '1':
print("Hola, espero que te lo estรฉs pasando bien")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print(f"El resultado de la suma es: {n1+n2}")
elif opcion =='3':
print("ยกHasta luego! Ha sido un placer ayudarte")
#bandera = False
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
Bienvenido al menรบ interactivo
ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Saludar
2) Sumar dos nรบmeros
3) Salir
###Markdown
EJERCICIOS Realiza un programa que lea dos nรบmeros por teclado y permita elegir entre 3 opciones en un menรบ:- Mostrar una suma de los dos nรบmeros- Mostrar una resta de los dos nรบmeros (el primero menos el segundo)- Mostrar una multiplicaciรณn de los dos nรบmerosEn caso de introducir una opciรณn invรกlida, el programa informarรก de que no es correcta
###Code
print("Bienvenido al menรบ interactivo")
while True:
print("""ยฟQuรฉ quieres hacer? Escribe una opciรณn
1) Sumar de nรบmeros
2) Resta de nรบmeros
3) Multiplicacion de nรบmeros
4) Salir""")
opcion = input() # me devuelve un string ''
if opcion == '1':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print(f"El resultado de la suma es: {n1+n2}")
elif opcion == '2':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print(f"El resultado de la resta es: {n1-n2}")
elif opcion =='3':
n1 = float(input("Introduce el primer nรบmero: "))
n2 = float(input("Introduce el segundo nรบmero: "))
print(f"El resultado de la multiplicacion es: {n1*n2}")
elif opcion =='4':
print("Gracias por usar la aplicacion")
break
else:
print("Comando desconocido, vuelve a intentarlo")
###Output
_____no_output_____
###Markdown
Bucle For---------------------------------------------- El bucle for, en Python, es aquel que nos permitirรก iterar sobre una variable compleja, del tipo lista o tupla:
###Code
# Iterando sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
for nombre in mi_lista:
print(nombre)
num1, num2 = [12,15]
print(num1)
print(num2)
for i, nombre in enumerate(mi_lista):
print(i, nombre)
# Modificando valores sobre listas
mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio','Juan']
for indice,nombre in enumerate(mi_lista):
# print(indice, nombre)
if nombre == 'Juan':
mi_lista[indice] = 'Maria'
print(mi_lista)
# valor cambiado
mi_lista
# Iterando sobre diccionarios
dicx = {'key1':1,'key2':2,'key3':3}
for key,value in dicx.items():
if key == 'key1':
dicx[key] = 10
dicx
dicx.items()
# dicx['key1'] = 10
# Iterando sobre strings
texto = 'Hola Mundo'
for i, letra in enumerate(texto):
print(i, letra)
texto= 'Hola Mundo'
texto
texto[1]
texto_s = ''
for l in texto:
texto_s += l
texto_s
texto_s = ''
for l in texto:
if l == 'o':
texto_s = texto_s + 'x'
continue
texto_s = texto_s + l
print(texto_s)
texto_s
###Output
H
Hxl
Hxla
Hxla
Hxla M
Hxla Mu
Hxla Mun
Hxla Mund
###Markdown
Funcion Range Sirve para generar una lista de nรบmeros que podemos recorrer fรกcilmente, pero no ocupa memoria porque se interpreta sobre la marcha:
###Code
# range -> no es un lista
# si quiero convetir el range a una lista debo hacer
[*range(11)]
[*range(2012, 2000, -1)]
# -*- coding: utf-8 -*-
# Generarndo un rango de valores del 2001 al 2012
for anio in range(2001, 2013):
print(f"Informes del Aรฑo {anio}")
# por defecto el inicio de la funcion es 0
for i in range(10):
print(f'#{i}')
anio = [2001,2002,2003,2004,2005,2006]
while anio <= 2012:
###Output
_____no_output_____
###Markdown
EJERCICIOS----------------------------- 1. Realiza un programa que pida al usuario cuantos nรบmeros quiere introducir. Luego lee todos los nรบmeros y realiza una media aritmรฉtica. NotaUsar listas
###Code
# 1. Solicitar la cantidad de nรบmeros a introducir al usuario
# 2. Por cada nรบmero a introducir, solicito el numero
cantidad = int(input("Ingrese la cantidad de nรบmeros a introducir: "))
cantidad
lista_numeros = []
for i in range(cantidad):
msg = "ingrese el numero {}".format(i +1)
x = int(input(msg))
# agregando el numero a la lista
lista_numeros.append(x)
lista_numeros
# para cada nรบmero -> realizo la suma de todos los numeros
sumatoria = 0
for numero in lista_numeros:
sumatoria += numero
print(sumatoria)
sumatoria / cantidad
###Output
_____no_output_____
###Markdown
2. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla un triรกngulo rectรกngulo como el de mรกs abajo, de altura el nรบmero introducido. Para n = 4
###Code
h = int(input("Introduce la altura del triangulo: "))
h
for i in range(1, h+1):
print("#" * i)
"#" * 0
###Output
_____no_output_____
###Markdown
###Code
h = int(input("Introduce la altura del triangulo: "))
h
for i in range(1, h+1):
print(" " * (h - i) + "#" * i)
" #"
" ##"
" " * 3 + "#" * 1
" " * 2 + "#" * 2
" " * 1 + "#" * 3
" " * 0 + "#" * 4
###Output
_____no_output_____
###Markdown
3. Escribir un programa que pida al usuario un nรบmero entero y muestre por pantalla si es un nรบmero primo o no.
###Code
numero = int(input("Introduce la altura del triangulo: "))
numero
primo = True
for i in range(2, numero):
if numero % i ==0 :
primo = False
if primo:
print(f"el numero {numero} es primo")
else:
print(f"el numero {numero} NO es primo")
###Output
el numero 9 NO es primo
|
docs/notebooks/dispersion/two_fluid_dispersion.ipynb | ###Markdown
Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.two_fluid_dispersion.two_fluid_dispersion_solution.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid_dispersion_solution()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid_dispersion_solution()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012)
###Code
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants.si import c
from matplotlib import colors
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from plasmapy.dispersion.two_fluid_dispersion import two_fluid_dispersion_solution
from plasmapy.formulary import parameters as pfp
from plasmapy.particles import Particle
plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5]
###Output
_____no_output_____
###Markdown
Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": 45 * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],),
"va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array.
###Code
# compute
omegas = two_fluid_dispersion_solution(**inputs)
(list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape)
###Output
_____no_output_____
###Markdown
Let's plot the results of each wave mode.
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
# plot
plt.plot(
k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast",
)
ax = plt.gca()
ax.plot(
k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn",
)
ax.plot(
k_prime,
np.real(omegas["acoustic_mode"] / params["wpe"]),
"g.",
ms=1,
label="Acoustic",
)
# adjust axes
ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylim(1e-6, 2e-2)
ax.tick_params(
which="both", direction="in", width=1, labelsize=fs, right=True, length=5,
)
# annotate
text = (
f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad "
f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad "
f"\\theta = {inputs['theta'].value:.0f}"
"^{\\circ}$"
)
ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18)
ax.legend(loc="upper left", markerscale=5, fontsize=fs)
###Output
_____no_output_____
###Markdown
Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": np.linspace(5, 85, 100) * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],),
"va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$.
###Code
# compute
omegas = two_fluid_dispersion_solution(**inputs)
(
omegas["fast_mode"].shape,
omegas["fast_mode"].shape[0] == inputs["k"].size,
omegas["fast_mode"].shape[1] == inputs["theta"].size,
)
###Output
_____no_output_____
###Markdown
Let's plot (the fast mode)!
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value
# plot
im = plt.imshow(
zdata,
aspect="auto",
origin="lower",
extent=[
np.min(k_prime.value),
np.max(k_prime.value),
np.min(inputs["theta"].value),
np.max(inputs["theta"].value),
],
interpolation=None,
cmap=plt.cm.Spectral,
)
ax = plt.gca()
# # adjust axes
ax.set_xscale("linear")
ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs)
ax.tick_params(
which="both",
direction="in",
width=2,
labelsize=fs,
right=True,
top=True,
length=10,
)
# Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="5%", pad=0.07)
cbar = plt.colorbar(
im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0,
)
cbar.ax.tick_params(
axis="x",
direction="in",
width=2,
length=10,
top=True,
bottom=False,
labelsize=fs,
pad=0.0,
labeltop=True,
labelbottom=False,
)
cbar.ax.xaxis.set_label_position("top")
cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8)
###Output
_____no_output_____
###Markdown
Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions.
###Code
# define input parameters
inputs = {
"B": 400e-4 * u.T,
"ion": Particle("He+"),
"n_i": 6.358e19 * u.m ** -3,
"T_e": 20 * u.eV,
"T_i": 10 * u.eV,
"theta": np.linspace(0, 90) * u.deg,
"k": (2 * np.pi * u.rad) / (0.56547 * u.m),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]),
"wci": pfp.wc_(inputs["B"], inputs["ion"]),
"va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]),
}
params["beta"] = (params["cs"] / params["va"]).value ** 2
params["wpe"] = pfp.wp_(params["n_e"], "e-")
params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2
(params["beta"], params["Lambda"])
# compute
omegas = two_fluid_dispersion_solution(**inputs)
# generate data for plots
plt_vals = {}
for mode, arr in omegas.items():
norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2
plt_vals[mode] = {
"x": norm * np.sin(inputs["theta"].to(u.rad).value),
"y": norm * np.cos(inputs["theta"].to(u.rad).value),
}
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# Fast mode
plt.plot(
plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast",
)
ax = plt.gca()
# adjust axes
ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs)
ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs)
ax.set_xlim(0.0, 1.5)
ax.set_ylim(0.0, 2.0)
for spine in ax.spines.values():
spine.set_linewidth(2)
ax.minorticks_on()
ax.tick_params(which="both", labelsize=fs, width=2)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", length=5)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Alfven mode
plt.plot(
plt_vals["alfven_mode"]["x"],
plt_vals["alfven_mode"]["y"],
linewidth=2,
label="Alfv$\`{e}$n",
)
# Acoustic mode
plt.plot(
plt_vals["acoustic_mode"]["x"],
plt_vals["acoustic_mode"]["y"],
linewidth=2,
label="Acoustic",
)
# annotations
plt.legend(fontsize=fs, loc="upper right")
###Output
_____no_output_____
###Markdown
Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012)
###Code
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants.si import c
from matplotlib import colors
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from plasmapy.dispersion.analytical.two_fluid_ import two_fluid
from plasmapy.formulary import parameters as pfp
from plasmapy.particles import Particle
plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5]
###Output
_____no_output_____
###Markdown
Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": 45 * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],),
"va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array.
###Code
# compute
omegas = two_fluid(**inputs)
(list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape)
###Output
_____no_output_____
###Markdown
Let's plot the results of each wave mode.
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
# plot
plt.plot(
k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast",
)
ax = plt.gca()
ax.plot(
k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn",
)
ax.plot(
k_prime,
np.real(omegas["acoustic_mode"] / params["wpe"]),
"g.",
ms=1,
label="Acoustic",
)
# adjust axes
ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylim(1e-6, 2e-2)
ax.tick_params(
which="both", direction="in", width=1, labelsize=fs, right=True, length=5,
)
# annotate
text = (
f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad "
f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad "
f"\\theta = {inputs['theta'].value:.0f}"
"^{\\circ}$"
)
ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18)
ax.legend(loc="upper left", markerscale=5, fontsize=fs)
###Output
_____no_output_____
###Markdown
Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": np.linspace(5, 85, 100) * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],),
"va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$.
###Code
# compute
omegas = two_fluid(**inputs)
(
omegas["fast_mode"].shape,
omegas["fast_mode"].shape[0] == inputs["k"].size,
omegas["fast_mode"].shape[1] == inputs["theta"].size,
)
###Output
_____no_output_____
###Markdown
Let's plot (the fast mode)!
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value
# plot
im = plt.imshow(
zdata,
aspect="auto",
origin="lower",
extent=[
np.min(k_prime.value),
np.max(k_prime.value),
np.min(inputs["theta"].value),
np.max(inputs["theta"].value),
],
interpolation=None,
cmap=plt.cm.Spectral,
)
ax = plt.gca()
# # adjust axes
ax.set_xscale("linear")
ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs)
ax.tick_params(
which="both",
direction="in",
width=2,
labelsize=fs,
right=True,
top=True,
length=10,
)
# Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="5%", pad=0.07)
cbar = plt.colorbar(
im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0,
)
cbar.ax.tick_params(
axis="x",
direction="in",
width=2,
length=10,
top=True,
bottom=False,
labelsize=fs,
pad=0.0,
labeltop=True,
labelbottom=False,
)
cbar.ax.xaxis.set_label_position("top")
cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8)
###Output
_____no_output_____
###Markdown
Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions.
###Code
# define input parameters
inputs = {
"B": 400e-4 * u.T,
"ion": Particle("He+"),
"n_i": 6.358e19 * u.m ** -3,
"T_e": 20 * u.eV,
"T_i": 10 * u.eV,
"theta": np.linspace(0, 90) * u.deg,
"k": (2 * np.pi * u.rad) / (0.56547 * u.m),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]),
"wci": pfp.wc_(inputs["B"], inputs["ion"]),
"va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]),
}
params["beta"] = (params["cs"] / params["va"]).value ** 2
params["wpe"] = pfp.wp_(params["n_e"], "e-")
params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2
(params["beta"], params["Lambda"])
# compute
omegas = two_fluid(**inputs)
# generate data for plots
plt_vals = {}
for mode, arr in omegas.items():
norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2
plt_vals[mode] = {
"x": norm * np.sin(inputs["theta"].to(u.rad).value),
"y": norm * np.cos(inputs["theta"].to(u.rad).value),
}
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# Fast mode
plt.plot(
plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast",
)
ax = plt.gca()
# adjust axes
ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs)
ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs)
ax.set_xlim(0.0, 1.5)
ax.set_ylim(0.0, 2.0)
for spine in ax.spines.values():
spine.set_linewidth(2)
ax.minorticks_on()
ax.tick_params(which="both", labelsize=fs, width=2)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", length=5)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Alfven mode
plt.plot(
plt_vals["alfven_mode"]["x"],
plt_vals["alfven_mode"]["y"],
linewidth=2,
label="Alfv$\`{e}$n",
)
# Acoustic mode
plt.plot(
plt_vals["acoustic_mode"]["x"],
plt_vals["acoustic_mode"]["y"],
linewidth=2,
label="Acoustic",
)
# annotations
plt.legend(fontsize=fs, loc="upper right")
###Output
_____no_output_____
###Markdown
Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012)
###Code
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants.si import c
from matplotlib import colors
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from plasmapy.dispersion.analytical.two_fluid_ import two_fluid
from plasmapy.formulary import parameters as pfp
from plasmapy.particles import Particle
plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5]
###Output
_____no_output_____
###Markdown
Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": 45 * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.ion_sound_speed(
inputs["T_e"],
inputs["T_i"],
inputs["ion"],
),
"va": pfp.Alfven_speed(
inputs["B"],
inputs["n_i"],
ion=inputs["ion"],
),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array.
###Code
# compute
omegas = two_fluid(**inputs)
(list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape)
###Output
_____no_output_____
###Markdown
Let's plot the results of each wave mode.
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
# plot
plt.plot(
k_prime,
np.real(omegas["fast_mode"] / params["wpe"]),
"r.",
ms=1,
label="Fast",
)
ax = plt.gca()
ax.plot(
k_prime,
np.real(omegas["alfven_mode"] / params["wpe"]),
"b.",
ms=1,
label="Alfvรจn",
)
ax.plot(
k_prime,
np.real(omegas["acoustic_mode"] / params["wpe"]),
"g.",
ms=1,
label="Acoustic",
)
# adjust axes
ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylim(1e-6, 2e-2)
ax.tick_params(
which="both",
direction="in",
width=1,
labelsize=fs,
right=True,
length=5,
)
# annotate
text = (
f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad "
f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad "
f"\\theta = {inputs['theta'].value:.0f}"
"^{\\circ}$"
)
ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18)
ax.legend(loc="upper left", markerscale=5, fontsize=fs)
###Output
_____no_output_____
###Markdown
Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": np.linspace(5, 85, 100) * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.ion_sound_speed(
inputs["T_e"],
inputs["T_i"],
inputs["ion"],
),
"va": pfp.Alfven_speed(
inputs["B"],
inputs["n_i"],
ion=inputs["ion"],
),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$.
###Code
# compute
omegas = two_fluid(**inputs)
(
omegas["fast_mode"].shape,
omegas["fast_mode"].shape[0] == inputs["k"].size,
omegas["fast_mode"].shape[1] == inputs["theta"].size,
)
###Output
_____no_output_____
###Markdown
Let's plot (the fast mode)!
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value
# plot
im = plt.imshow(
zdata,
aspect="auto",
origin="lower",
extent=[
np.min(k_prime.value),
np.max(k_prime.value),
np.min(inputs["theta"].value),
np.max(inputs["theta"].value),
],
interpolation=None,
cmap=plt.cm.Spectral,
)
ax = plt.gca()
# # adjust axes
ax.set_xscale("linear")
ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs)
ax.tick_params(
which="both",
direction="in",
width=2,
labelsize=fs,
right=True,
top=True,
length=10,
)
# Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="5%", pad=0.07)
cbar = plt.colorbar(
im,
cax=cax,
orientation="horizontal",
ticks=None,
fraction=0.05,
pad=0.0,
)
cbar.ax.tick_params(
axis="x",
direction="in",
width=2,
length=10,
top=True,
bottom=False,
labelsize=fs,
pad=0.0,
labeltop=True,
labelbottom=False,
)
cbar.ax.xaxis.set_label_position("top")
cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8)
###Output
_____no_output_____
###Markdown
Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions.
###Code
# define input parameters
inputs = {
"B": 400e-4 * u.T,
"ion": Particle("He+"),
"n_i": 6.358e19 * u.m ** -3,
"T_e": 20 * u.eV,
"T_i": 10 * u.eV,
"theta": np.linspace(0, 90) * u.deg,
"k": (2 * np.pi * u.rad) / (0.56547 * u.m),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]),
"wci": pfp.wc_(inputs["B"], inputs["ion"]),
"va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]),
}
params["beta"] = (params["cs"] / params["va"]).value ** 2
params["wpe"] = pfp.wp_(params["n_e"], "e-")
params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2
(params["beta"], params["Lambda"])
# compute
omegas = two_fluid(**inputs)
# generate data for plots
plt_vals = {}
for mode, arr in omegas.items():
norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2
plt_vals[mode] = {
"x": norm * np.sin(inputs["theta"].to(u.rad).value),
"y": norm * np.cos(inputs["theta"].to(u.rad).value),
}
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# Fast mode
plt.plot(
plt_vals["fast_mode"]["x"],
plt_vals["fast_mode"]["y"],
linewidth=2,
label="Fast",
)
ax = plt.gca()
# adjust axes
ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs)
ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs)
ax.set_xlim(0.0, 1.5)
ax.set_ylim(0.0, 2.0)
for spine in ax.spines.values():
spine.set_linewidth(2)
ax.minorticks_on()
ax.tick_params(which="both", labelsize=fs, width=2)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", length=5)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Alfven mode
plt.plot(
plt_vals["alfven_mode"]["x"],
plt_vals["alfven_mode"]["y"],
linewidth=2,
label="Alfv$\`{e}$n",
)
# Acoustic mode
plt.plot(
plt_vals["acoustic_mode"]["x"],
plt_vals["acoustic_mode"]["y"],
linewidth=2,
label="Acoustic",
)
# annotations
plt.legend(fontsize=fs, loc="upper right")
###Output
_____no_output_____
###Markdown
Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionality of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012)
###Code
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants.si import c
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from plasmapy.dispersion.analytical.two_fluid_ import two_fluid
from plasmapy.formulary import speeds
from plasmapy.formulary.frequencies import gyrofrequency, plasma_frequency, wc_, wp_
from plasmapy.formulary.lengths import inertial_length
from plasmapy.particles import Particle
plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5]
###Output
_____no_output_____
###Markdown
Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": 45 * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": speeds.ion_sound_speed(
inputs["T_e"],
inputs["T_i"],
inputs["ion"],
),
"va": speeds.Alfven_speed(
inputs["B"],
inputs["n_i"],
ion=inputs["ion"],
),
"wci": gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = inertial_length(params["n_e"], "e-")
params["wpe"] = plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array.
###Code
# compute
omegas = two_fluid(**inputs)
(list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape)
###Output
_____no_output_____
###Markdown
Let's plot the results of each wave mode.
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
# plot
plt.plot(
k_prime,
np.real(omegas["fast_mode"] / params["wpe"]),
"r.",
ms=1,
label="Fast",
)
ax = plt.gca()
ax.plot(
k_prime,
np.real(omegas["alfven_mode"] / params["wpe"]),
"b.",
ms=1,
label="Alfvรจn",
)
ax.plot(
k_prime,
np.real(omegas["acoustic_mode"] / params["wpe"]),
"g.",
ms=1,
label="Acoustic",
)
# adjust axes
ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylim(1e-6, 2e-2)
ax.tick_params(
which="both",
direction="in",
width=1,
labelsize=fs,
right=True,
length=5,
)
# annotate
text = (
f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad "
f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad "
f"\\theta = {inputs['theta'].value:.0f}"
"^{\\circ}$"
)
ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18)
ax.legend(loc="upper left", markerscale=5, fontsize=fs)
###Output
_____no_output_____
###Markdown
Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": np.linspace(5, 85, 100) * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": speeds.ion_sound_speed(
inputs["T_e"],
inputs["T_i"],
inputs["ion"],
),
"va": speeds.Alfven_speed(
inputs["B"],
inputs["n_i"],
ion=inputs["ion"],
),
"wci": gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = inertial_length(params["n_e"], "e-")
params["wpe"] = plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$.
###Code
# compute
omegas = two_fluid(**inputs)
(
omegas["fast_mode"].shape,
omegas["fast_mode"].shape[0] == inputs["k"].size,
omegas["fast_mode"].shape[1] == inputs["theta"].size,
)
###Output
_____no_output_____
###Markdown
Let's plot (the fast mode)!
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value
# plot
im = plt.imshow(
zdata,
aspect="auto",
origin="lower",
extent=[
np.min(k_prime.value),
np.max(k_prime.value),
np.min(inputs["theta"].value),
np.max(inputs["theta"].value),
],
interpolation=None,
cmap=plt.cm.Spectral,
)
ax = plt.gca()
# # adjust axes
ax.set_xscale("linear")
ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs)
ax.tick_params(
which="both",
direction="in",
width=2,
labelsize=fs,
right=True,
top=True,
length=10,
)
# Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="5%", pad=0.07)
cbar = plt.colorbar(
im,
cax=cax,
orientation="horizontal",
ticks=None,
fraction=0.05,
pad=0.0,
)
cbar.ax.tick_params(
axis="x",
direction="in",
width=2,
length=10,
top=True,
bottom=False,
labelsize=fs,
pad=0.0,
labeltop=True,
labelbottom=False,
)
cbar.ax.xaxis.set_label_position("top")
cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8)
###Output
_____no_output_____
###Markdown
Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions.
###Code
# define input parameters
inputs = {
"B": 400e-4 * u.T,
"ion": Particle("He+"),
"n_i": 6.358e19 * u.m ** -3,
"T_e": 20 * u.eV,
"T_i": 10 * u.eV,
"theta": np.linspace(0, 90) * u.deg,
"k": (2 * np.pi * u.rad) / (0.56547 * u.m),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": speeds.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]),
"wci": wc_(inputs["B"], inputs["ion"]),
"va": speeds.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]),
}
params["beta"] = (params["cs"] / params["va"]).value ** 2
params["wpe"] = wp_(params["n_e"], "e-")
params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2
(params["beta"], params["Lambda"])
# compute
omegas = two_fluid(**inputs)
# generate data for plots
plt_vals = {}
for mode, arr in omegas.items():
norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2
plt_vals[mode] = {
"x": norm * np.sin(inputs["theta"].to(u.rad).value),
"y": norm * np.cos(inputs["theta"].to(u.rad).value),
}
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# Fast mode
plt.plot(
plt_vals["fast_mode"]["x"],
plt_vals["fast_mode"]["y"],
linewidth=2,
label="Fast",
)
ax = plt.gca()
# adjust axes
ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs)
ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs)
ax.set_xlim(0.0, 1.5)
ax.set_ylim(0.0, 2.0)
for spine in ax.spines.values():
spine.set_linewidth(2)
ax.minorticks_on()
ax.tick_params(which="both", labelsize=fs, width=2)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", length=5)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Alfven mode
plt.plot(
plt_vals["alfven_mode"]["x"],
plt_vals["alfven_mode"]["y"],
linewidth=2,
label="Alfv$\`{e}$n",
)
# Acoustic mode
plt.plot(
plt_vals["acoustic_mode"]["x"],
plt_vals["acoustic_mode"]["y"],
linewidth=2,
label="Acoustic",
)
# annotations
plt.legend(fontsize=fs, loc="upper right")
###Output
_____no_output_____
###Markdown
Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.analytical.two_fluid_.two_fluid.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012)
###Code
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants.si import c
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from plasmapy.dispersion.analytical.two_fluid_ import two_fluid
from plasmapy.formulary import speeds
from plasmapy.formulary.frequencies import gyrofrequency, plasma_frequency, wc_, wp_
from plasmapy.formulary.lengths import inertial_length
from plasmapy.particles import Particle
plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5]
###Output
_____no_output_____
###Markdown
Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": 45 * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": speeds.ion_sound_speed(
inputs["T_e"],
inputs["T_i"],
inputs["ion"],
),
"va": speeds.Alfven_speed(
inputs["B"],
inputs["n_i"],
ion=inputs["ion"],
),
"wci": gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = inertial_length(params["n_e"], "e-")
params["wpe"] = plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array.
###Code
# compute
omegas = two_fluid(**inputs)
(list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape)
###Output
_____no_output_____
###Markdown
Let's plot the results of each wave mode.
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
# plot
plt.plot(
k_prime,
np.real(omegas["fast_mode"] / params["wpe"]),
"r.",
ms=1,
label="Fast",
)
ax = plt.gca()
ax.plot(
k_prime,
np.real(omegas["alfven_mode"] / params["wpe"]),
"b.",
ms=1,
label="Alfvรจn",
)
ax.plot(
k_prime,
np.real(omegas["acoustic_mode"] / params["wpe"]),
"g.",
ms=1,
label="Acoustic",
)
# adjust axes
ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylim(1e-6, 2e-2)
ax.tick_params(
which="both",
direction="in",
width=1,
labelsize=fs,
right=True,
length=5,
)
# annotate
text = (
f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad "
f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad "
f"\\theta = {inputs['theta'].value:.0f}"
"^{\\circ}$"
)
ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18)
ax.legend(loc="upper left", markerscale=5, fontsize=fs)
###Output
_____no_output_____
###Markdown
Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": np.linspace(5, 85, 100) * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": speeds.ion_sound_speed(
inputs["T_e"],
inputs["T_i"],
inputs["ion"],
),
"va": speeds.Alfven_speed(
inputs["B"],
inputs["n_i"],
ion=inputs["ion"],
),
"wci": gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = inertial_length(params["n_e"], "e-")
params["wpe"] = plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$.
###Code
# compute
omegas = two_fluid(**inputs)
(
omegas["fast_mode"].shape,
omegas["fast_mode"].shape[0] == inputs["k"].size,
omegas["fast_mode"].shape[1] == inputs["theta"].size,
)
###Output
_____no_output_____
###Markdown
Let's plot (the fast mode)!
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value
# plot
im = plt.imshow(
zdata,
aspect="auto",
origin="lower",
extent=[
np.min(k_prime.value),
np.max(k_prime.value),
np.min(inputs["theta"].value),
np.max(inputs["theta"].value),
],
interpolation=None,
cmap=plt.cm.Spectral,
)
ax = plt.gca()
# # adjust axes
ax.set_xscale("linear")
ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs)
ax.tick_params(
which="both",
direction="in",
width=2,
labelsize=fs,
right=True,
top=True,
length=10,
)
# Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="5%", pad=0.07)
cbar = plt.colorbar(
im,
cax=cax,
orientation="horizontal",
ticks=None,
fraction=0.05,
pad=0.0,
)
cbar.ax.tick_params(
axis="x",
direction="in",
width=2,
length=10,
top=True,
bottom=False,
labelsize=fs,
pad=0.0,
labeltop=True,
labelbottom=False,
)
cbar.ax.xaxis.set_label_position("top")
cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8)
###Output
_____no_output_____
###Markdown
Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions.
###Code
# define input parameters
inputs = {
"B": 400e-4 * u.T,
"ion": Particle("He+"),
"n_i": 6.358e19 * u.m ** -3,
"T_e": 20 * u.eV,
"T_i": 10 * u.eV,
"theta": np.linspace(0, 90) * u.deg,
"k": (2 * np.pi * u.rad) / (0.56547 * u.m),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].charge_number),
"cs": speeds.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]),
"wci": wc_(inputs["B"], inputs["ion"]),
"va": speeds.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]),
}
params["beta"] = (params["cs"] / params["va"]).value ** 2
params["wpe"] = wp_(params["n_e"], "e-")
params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2
(params["beta"], params["Lambda"])
# compute
omegas = two_fluid(**inputs)
# generate data for plots
plt_vals = {}
for mode, arr in omegas.items():
norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2
plt_vals[mode] = {
"x": norm * np.sin(inputs["theta"].to(u.rad).value),
"y": norm * np.cos(inputs["theta"].to(u.rad).value),
}
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# Fast mode
plt.plot(
plt_vals["fast_mode"]["x"],
plt_vals["fast_mode"]["y"],
linewidth=2,
label="Fast",
)
ax = plt.gca()
# adjust axes
ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs)
ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs)
ax.set_xlim(0.0, 1.5)
ax.set_ylim(0.0, 2.0)
for spine in ax.spines.values():
spine.set_linewidth(2)
ax.minorticks_on()
ax.tick_params(which="both", labelsize=fs, width=2)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", length=5)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Alfven mode
plt.plot(
plt_vals["alfven_mode"]["x"],
plt_vals["alfven_mode"]["y"],
linewidth=2,
label="Alfv$\`{e}$n",
)
# Acoustic mode
plt.plot(
plt_vals["acoustic_mode"]["x"],
plt_vals["acoustic_mode"]["y"],
linewidth=2,
label="Acoustic",
)
# annotations
plt.legend(fontsize=fs, loc="upper right")
###Output
_____no_output_____
###Markdown
Dispersion: A Full Two Fluid Solution[tfds]: ../../api/plasmapy.dispersion.two_fluid_dispersion.two_fluid_dispersion_solution.rst[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856[stringer1963]: https://doi.org/10.1088/0368-3281/5/2/304This notebook walks through the functionalty of the [two_fluid_dispersion_solution()][tfds] function. This function computes the wave frequencies for given wavenumbers and plasma parameters based on the analytical solution presented by [Bellan 2012][bellan2012] to the [Stringer 1963][stringer1963] two fluid dispersion relation. The two fluid dispersion equaiton assumes a uniform magnetic field, a zero D.C. electric field, and low-frequency waves $\omega / k c \ll 1$ which equates to$$ \left( \cos^2 \theta - Q \frac{\omega^2}{k^2 {v_A}^2} \right) \left[ \left( \cos^2 \theta - \frac{\omega^2}{k^2 {c_s}^2} \right) - Q \frac{\omega^2}{k^2 {v_A}^2} \left( 1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \right] \\ = \left(1 - \frac{\omega^2}{k^2 {c_s}^2} \right) \frac{\omega^2}{{\omega_{ci}}^2} \cos^2 \theta$$where$$Q = 1 + k^2 c^2/{\omega_{pe}}^2$$$$\cos \theta = \frac{k_z}{k}$$$$\mathbf{B_o} = B_{o} \mathbf{\hat{z}}$$$\omega$ is the wave frequency, $k$ is the wavenumber, $v_A$ is the Alfvรฉn velocity, $c_s$ is the sound speed, $\omega_{ci}$ is the ion gyrofrequency, and $\omega_{pe}$ is the electron plasma frequency.The approach outlined in Section 5 of [Bellan 2012][bellan2012] produces exact roots to the above dispersion equation for all three modes (fast, acoustic, and Alfvรฉn) without having to make additional approximations. The following dispersion relation is what the [two_fluid_dispersion_solution()][tfds] function computes.$$ \frac{\omega}{\omega_{ci}} = \sqrt{ 2 \Lambda \sqrt{-\frac{P}{3}} \cos\left( \frac{1}{3} \cos^{-1}\left( \frac{3q}{2p} \sqrt{-\frac{3}{p}} \right) - \frac{2 \pi}{3}j \right) + \frac{\Lambda A}{3} }$$where $j = 0$ represents the fast mode, $j = 1$ represents the Alfvรฉn mode, and $j = 2$ represents the acoustic mode. Additionally,$$p = \frac{3B-A^2}{3} \; , \; q = \frac{9AB-2A^3-27C}{27}$$$$A = \frac{Q + Q^2 \beta + Q \alpha + \alpha \Lambda}{Q^2} \; , \; B = \alpha \frac{1 + 2 Q \beta + \Lambda \beta}{Q^2} \; , \; C = \frac{\alpha^2 \beta}{Q^2}$$$$\alpha = \cos^2 \theta \; , \; \beta = \left( \frac{c_s}{v_A}\right)^2 \; , \; \Lambda = \left( \frac{k v_{A}}{\omega_{ci}}\right)^2$$ Contents:1. [Wave Propagating at 45 Degrees](Wave-Propagating-at-45-Degrees)2. [Wave frequencies on the k-theta plane](Wave-frequencies-on-the-k-theta-plane)3. [Reproduce Figure 1 from Bellan 2012](Reproduce-Figure-1-from-Bellan-2012)
###Code
%matplotlib inline
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants.si import c
from matplotlib import colors
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
from plasmapy.dispersion.two_fluid_dispersion import two_fluid_dispersion_solution
from plasmapy.formulary import parameters as pfp
from plasmapy.particles import Particle
plt.rcParams["figure.figsize"] = [10.5, 0.56 * 10.5]
###Output
_____no_output_____
###Markdown
Wave Propagating at 45 DegreesBelow we define the required parameters to compute the wave frequencies.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": 45 * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].integer_charge),
"cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],),
"va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
The computed wave frequencies ($rad/s$) are returned in a dictionary with keys representing the wave modes and the values being an Astropy [Quantity](https://docs.astropy.org/en/stable/api/astropy.units.Quantity.htmlastropy.units.Quantity). Since our inputs had a scalar $\theta$ and a 1D array of $k$'s, the computed wave frequencies will be a 1D array of size equal to the size of the $k$ array.
###Code
# compute
omegas = two_fluid_dispersion_solution(**inputs)
(list(omegas.keys()), omegas["fast_mode"], omegas["fast_mode"].shape)
###Output
_____no_output_____
###Markdown
Let's plot the results of each wave mode.
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
# plot
plt.plot(
k_prime, np.real(omegas["fast_mode"] / params["wpe"]), "r.", ms=1, label="Fast",
)
ax = plt.gca()
ax.plot(
k_prime, np.real(omegas["alfven_mode"] / params["wpe"]), "b.", ms=1, label="Alfvรจn",
)
ax.plot(
k_prime,
np.real(omegas["acoustic_mode"] / params["wpe"]),
"g.",
ms=1,
label="Acoustic",
)
# adjust axes
ax.set_xlabel(r"$kc / \omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$Re(\omega / \omega_{pe})$", fontsize=fs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylim(1e-6, 2e-2)
ax.tick_params(
which="both", direction="in", width=1, labelsize=fs, right=True, length=5,
)
# annotate
text = (
f"$v_A/c_s = {params['va'] / params['cs']:.1f} \qquad "
f"c/v_A = 10^{np.log10(c / params['va']):.0f} \qquad "
f"\\theta = {inputs['theta'].value:.0f}"
"^{\\circ}$"
)
ax.text(0.25, 0.95, text, transform=ax.transAxes, fontsize=18)
ax.legend(loc="upper left", markerscale=5, fontsize=fs)
###Output
_____no_output_____
###Markdown
Wave frequencies on the k-theta planeLet us now look at the distribution of $\omega$ on a $k$-$\theta$ plane.
###Code
# define input parameters
inputs = {
"k": np.linspace(10 ** -7, 10 ** -2, 10000) * u.rad / u.m,
"theta": np.linspace(5, 85, 100) * u.deg,
"n_i": 5 * u.cm ** -3,
"B": 8.3e-9 * u.T,
"T_e": 1.6e6 * u.K,
"T_i": 4.0e5 * u.K,
"ion": Particle("p+"),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].integer_charge),
"cs": pfp.ion_sound_speed(inputs["T_e"], inputs["T_i"], inputs["ion"],),
"va": pfp.Alfven_speed(inputs["B"], inputs["n_i"], ion=inputs["ion"],),
"wci": pfp.gyrofrequency(inputs["B"], inputs["ion"]),
}
params["lpe"] = pfp.inertial_length(params["n_e"], "e-")
params["wpe"] = pfp.plasma_frequency(params["n_e"], "e-")
###Output
_____no_output_____
###Markdown
Since the $\theta$ and $k$ values are now 1-D arrays, the returned wave frequencies will be 2-D arrays with the first dimension matching the size of $k$ and the second dimension matching the size of $\theta$.
###Code
# compute
omegas = two_fluid_dispersion_solution(**inputs)
(
omegas["fast_mode"].shape,
omegas["fast_mode"].shape[0] == inputs["k"].size,
omegas["fast_mode"].shape[1] == inputs["theta"].size,
)
###Output
_____no_output_____
###Markdown
Let's plot (the fast mode)!
###Code
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# normalize data
k_prime = inputs["k"] * params["lpe"]
zdata = np.transpose(np.real(omegas["fast_mode"].value)) / params["wpe"].value
# plot
im = plt.imshow(
zdata,
aspect="auto",
origin="lower",
extent=[
np.min(k_prime.value),
np.max(k_prime.value),
np.min(inputs["theta"].value),
np.max(inputs["theta"].value),
],
interpolation=None,
cmap=plt.cm.Spectral,
)
ax = plt.gca()
# # adjust axes
ax.set_xscale("linear")
ax.set_xlabel(r"$kc/\omega_{pe}$", fontsize=fs)
ax.set_ylabel(r"$\theta$ [$deg.$]", fontsize=fs)
ax.tick_params(
which="both",
direction="in",
width=2,
labelsize=fs,
right=True,
top=True,
length=10,
)
# Add colorbar
divider = make_axes_locatable(ax)
cax = divider.append_axes("top", size="5%", pad=0.07)
cbar = plt.colorbar(
im, cax=cax, orientation="horizontal", ticks=None, fraction=0.05, pad=0.0,
)
cbar.ax.tick_params(
axis="x",
direction="in",
width=2,
length=10,
top=True,
bottom=False,
labelsize=fs,
pad=0.0,
labeltop=True,
labelbottom=False,
)
cbar.ax.xaxis.set_label_position("top")
cbar.set_label(r"$\omega/\omega_{pe}$", fontsize=fs, labelpad=8)
###Output
_____no_output_____
###Markdown
Reproduce Figure 1 from Bellan 2012[bellan2012]: https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2012JA017856Figure 1 of [Bellan 2012][bellan2012] chooses parameters such that $\beta = 0.4$ and $\Lambda=0.4$. Below we define parameters to approximate Bellan's assumptions.
###Code
# define input parameters
inputs = {
"B": 400e-4 * u.T,
"ion": Particle("He+"),
"n_i": 6.358e19 * u.m ** -3,
"T_e": 20 * u.eV,
"T_i": 10 * u.eV,
"theta": np.linspace(0, 90) * u.deg,
"k": (2 * np.pi * u.rad) / (0.56547 * u.m),
}
# a few useful plasma parameters
params = {
"n_e": inputs["n_i"] * abs(inputs["ion"].integer_charge),
"cs": pfp.cs_(inputs["T_e"], inputs["T_i"], inputs["ion"]),
"wci": pfp.wc_(inputs["B"], inputs["ion"]),
"va": pfp.va_(inputs["B"], inputs["n_i"], ion=inputs["ion"]),
}
params["beta"] = (params["cs"] / params["va"]).value ** 2
params["wpe"] = pfp.wp_(params["n_e"], "e-")
params["Lambda"] = (inputs["k"] * params["va"] / params["wci"]).value ** 2
(params["beta"], params["Lambda"])
# compute
omegas = two_fluid_dispersion_solution(**inputs)
# generate data for plots
plt_vals = {}
for mode, arr in omegas.items():
norm = (np.absolute(arr) / (inputs["k"] * params["va"])).value ** 2
plt_vals[mode] = {
"x": norm * np.sin(inputs["theta"].to(u.rad).value),
"y": norm * np.cos(inputs["theta"].to(u.rad).value),
}
fs = 14 # default font size
figwidth, figheight = plt.rcParams["figure.figsize"]
figheight = 1.6 * figheight
fig = plt.figure(figsize=[figwidth, figheight])
# Fast mode
plt.plot(
plt_vals["fast_mode"]["x"], plt_vals["fast_mode"]["y"], linewidth=2, label="Fast",
)
ax = plt.gca()
# adjust axes
ax.set_xlabel(r"$(\omega / k v_A)^2 \, \sin \theta$", fontsize=fs)
ax.set_ylabel(r"$(\omega / k v_A)^2 \, \cos \theta$", fontsize=fs)
ax.set_xlim(0.0, 1.5)
ax.set_ylim(0.0, 2.0)
for spine in ax.spines.values():
spine.set_linewidth(2)
ax.minorticks_on()
ax.tick_params(which="both", labelsize=fs, width=2)
ax.tick_params(which="major", length=10)
ax.tick_params(which="minor", length=5)
ax.xaxis.set_major_locator(MultipleLocator(0.5))
ax.xaxis.set_minor_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.5))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
# Alfven mode
plt.plot(
plt_vals["alfven_mode"]["x"],
plt_vals["alfven_mode"]["y"],
linewidth=2,
label="Alfv$\`{e}$n",
)
# Acoustic mode
plt.plot(
plt_vals["acoustic_mode"]["x"],
plt_vals["acoustic_mode"]["y"],
linewidth=2,
label="Acoustic",
)
# annotations
plt.legend(fontsize=fs, loc="upper right")
###Output
_____no_output_____ |
tests/test_9/dog_app.ipynb | ###Markdown
Convolutional Neural Networks Project: Write an Algorithm for a Dog Identification App ---In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.The rubric contains _optional_ "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. If you decide to pursue the "Stand Out Suggestions", you should include the code in this Jupyter notebook.--- Why We're Here In this notebook, you will make the first steps towards developing an algorithm that could be used as part of a mobile or web app. At the end of this project, your code will accept any user-supplied image as input. If a dog is detected in the image, it will provide an estimate of the dog's breed. If a human is detected, it will provide an estimate of the dog breed that is most resembling. The image below displays potential sample output of your finished project (... but we expect that each student's algorithm will behave differently!). In this real-world setting, you will need to piece together a series of models to perform different tasks; for instance, the algorithm that detects humans in an image will be different from the CNN that infers dog breed. There are many points of possible failure, and no perfect algorithm exists. Your imperfect solution will nonetheless create a fun user experience! The Road AheadWe break the notebook into separate steps. Feel free to use the links below to navigate the notebook.* [Step 0](step0): Import Datasets* [Step 1](step1): Detect Humans* [Step 2](step2): Detect Dogs* [Step 3](step3): Create a CNN to Classify Dog Breeds (from Scratch)* [Step 4](step4): Create a CNN to Classify Dog Breeds (using Transfer Learning)* [Step 5](step5): Write your Algorithm* [Step 6](step6): Test Your Algorithm--- Step 0: Import DatasetsMake sure that you've downloaded the required human and dog datasets:**Note: if you are using the Udacity workspace, you *DO NOT* need to re-download these - they can be found in the `/data` folder as noted in the cell below.*** Download the [dog dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/dogImages.zip). Unzip the folder and place it in this project's home directory, at the location `/dog_images`. * Download the [human dataset](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/lfw.zip). Unzip the folder and place it in the home directory, at location `/lfw`. *Note: If you are using a Windows machine, you are encouraged to use [7zip](http://www.7-zip.org/) to extract the folder.*In the code cell below, we save the file paths for both the human (LFW) dataset and dog dataset in the numpy arrays `human_files` and `dog_files`.
###Code
import numpy as np
from glob import glob
# load filenames for human and dog images
human_files = np.array(glob("/data/lfw/*/*"))
dog_files = np.array(glob("/data/dog_images/*/*/*"))
# print number of images in each dataset
print('There are %d total human images.' % len(human_files))
print('There are %d total dog images.' % len(dog_files))
###Output
There are 13233 total human images.
There are 8351 total dog images.
###Markdown
Step 1: Detect HumansIn this section, we use OpenCV's implementation of [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) to detect human faces in images. OpenCV provides many pre-trained face detectors, stored as XML files on [github](https://github.com/opencv/opencv/tree/master/data/haarcascades). We have downloaded one of these detectors and stored it in the `haarcascades` directory. In the next code cell, we demonstrate how to use this detector to find human faces in a sample image.
###Code
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
# extract pre-trained face detector
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# load color (BGR) image
img = cv2.imread(human_files[0])
# convert BGR image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces in image
faces = face_cascade.detectMultiScale(gray)
# print number of faces detected in the image
print('Number of faces detected:', len(faces))
# get bounding box for each detected face
for (x,y,w,h) in faces:
# add bounding box to color image
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# convert BGR image to RGB for plotting
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# display the image, along with bounding box
plt.imshow(cv_rgb)
plt.show()
###Output
Number of faces detected: 1
###Markdown
Before using any of the face detectors, it is standard procedure to convert the images to grayscale. The `detectMultiScale` function executes the classifier stored in `face_cascade` and takes the grayscale image as a parameter. In the above code, `faces` is a numpy array of detected faces, where each row corresponds to a detected face. Each detected face is a 1D array with four entries that specifies the bounding box of the detected face. The first two entries in the array (extracted in the above code as `x` and `y`) specify the horizontal and vertical positions of the top left corner of the bounding box. The last two entries in the array (extracted here as `w` and `h`) specify the width and height of the box. Write a Human Face DetectorWe can use this procedure to write a function that returns `True` if a human face is detected in an image and `False` otherwise. This function, aptly named `face_detector`, takes a string-valued file path to an image as input and appears in the code block below.
###Code
# returns "True" if face is detected in image stored at img_path
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Assess the Human Face Detector__Question 1:__ Use the code cell below to test the performance of the `face_detector` function. - What percentage of the first 100 images in `human_files` have a detected human face? - What percentage of the first 100 images in `dog_files` have a detected human face? Ideally, we would like 100% of human images with a detected face and 0% of dog images with a detected face. You will see that our algorithm falls short of this goal, but still gives acceptable performance. We extract the file paths for the first 100 images from each of the datasets and store them in the numpy arrays `human_files_short` and `dog_files_short`. __Answer:__ (You can print out your results and/or write your percentages in this cell)
###Code
from tqdm import tqdm
human_files_short = human_files[:100]
dog_files_short = dog_files[:100]
#-#-# Do NOT modify the code above this line. #-#-#
## TODO: Test the performance of the face_detector algorithm
## on the images in human_files_short and dog_files_short.
human_faces = []
dog_faces = []
for human_image in human_files_short:
human_faces.append(face_detector(human_image))
for dog_image in dog_files_short:
dog_faces.append(face_detector(dog_image))
human_detected_faces = sum(human_faces)
dog_detected_faces = sum(dog_faces)
# precision
# tp / tp + fp
human_detected_faces / human_detected_faces + 0
correctly_identified = 100 - dog_detected_faces
correctly_identified / (correctly_identified + dog_detected_faces)
###Output
_____no_output_____
###Markdown
We suggest the face detector from OpenCV as a potential way to detect human images in your algorithm, but you are free to explore other approaches, especially approaches that make use of deep learning :). Please use the code cell below to design and test your own face detection algorithm. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.
###Code
### (Optional)
### TODO: Test performance of anotherface detection algorithm.
### Feel free to use as many code cells as needed.
###Output
_____no_output_____
###Markdown
--- Step 2: Detect DogsIn this section, we use a [pre-trained model](http://pytorch.org/docs/master/torchvision/models.html) to detect dogs in images. Obtain Pre-trained VGG-16 ModelThe code cell below downloads the VGG-16 model, along with weights that have been trained on [ImageNet](http://www.image-net.org/), a very large, very popular dataset used for image classification and other vision tasks. ImageNet contains over 10 million URLs, each linking to an image containing an object from one of [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a).
###Code
import torch
import torchvision.models as models
# define VGG16 model
VGG16 = models.vgg16(pretrained=True)
# check if CUDA is available
use_cuda = torch.cuda.is_available()
# move model to GPU if CUDA is available
if use_cuda:
VGG16 = VGG16.cuda()
###Output
Downloading: "https://download.pytorch.org/models/vgg16-397923af.pth" to /root/.torch/models/vgg16-397923af.pth
100%|โโโโโโโโโโ| 553433881/553433881 [00:05<00:00, 102387072.20it/s]
###Markdown
Given an image, this pre-trained VGG-16 model returns a prediction (derived from the 1000 possible categories in ImageNet) for the object that is contained in the image. (IMPLEMENTATION) Making Predictions with a Pre-trained ModelIn the next code cell, you will write a function that accepts a path to an image (such as `'dogImages/train/001.Affenpinscher/Affenpinscher_00001.jpg'`) as input and returns the index corresponding to the ImageNet class that is predicted by the pre-trained VGG-16 model. The output should always be an integer between 0 and 999, inclusive.Before writing the function, make sure that you take the time to learn how to appropriately pre-process tensors for pre-trained models in the [PyTorch documentation](http://pytorch.org/docs/stable/torchvision/models.html).
###Code
from PIL import Image
import torchvision.transforms as transforms
def VGG16_predict(img_path):
'''
Use pre-trained VGG-16 model to obtain index corresponding to
predicted ImageNet class for image at specified path
Args:
img_path: path to an image
Returns:
Index corresponding to VGG-16 model's prediction
'''
## TODO: Complete the function.
## Load and pre-process an image from the given img_path
## Return the *index* of the predicted class for that image
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
raw_img = Image.open(img_path)
cleaned_img = transform(raw_img)
float_img = cleaned_img.unsqueeze(0).cuda()
predicted_idx = VGG16(float_img)
predicted_idx = torch.argmax(predicted_idx)
return predicted_idx # predicted class index
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Write a Dog DetectorWhile looking at the [dictionary](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a), you will notice that the categories corresponding to dogs appear in an uninterrupted sequence and correspond to dictionary keys 151-268, inclusive, to include all categories from `'Chihuahua'` to `'Mexican hairless'`. Thus, in order to check to see if an image is predicted to contain a dog by the pre-trained VGG-16 model, we need only check if the pre-trained model predicts an index between 151 and 268 (inclusive).Use these ideas to complete the `dog_detector` function below, which returns `True` if a dog is detected in an image (and `False` if not).
###Code
### returns "True" if a dog is detected in the image stored at img_path
def dog_detector(img_path):
## TODO: Complete the function.
idx = VGG16_predict(img_path)
#print(idx)
if idx >= 151 and idx <= 268:
return True
return False # true/false
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Assess the Dog Detector__Question 2:__ Use the code cell below to test the performance of your `dog_detector` function. - What percentage of the images in `human_files_short` have a detected dog? - What percentage of the images in `dog_files_short` have a detected dog? __Answer:__
###Code
### TODO: Test the performance of the dog_detector function
### on the images in human_files_short and dog_files_short.
human_faces = []
dog_faces = []
for human_image in human_files_short:
human_faces.append(dog_detector(human_image))
for dog_image in dog_files_short:
dog_faces.append(dog_detector(dog_image))
human_detected_faces = sum(human_faces)
dog_detected_faces = sum(dog_faces)
print('Percentage of Human Faces',human_detected_faces / len(human_files_short))
print('Percentage of Dog Faces',dog_detected_faces / len(dog_files_short))
###Output
Percentage of Human Faces 0.0
Percentage of Dog Faces 1.0
###Markdown
We suggest VGG-16 as a potential network to detect dog images in your algorithm, but you are free to explore other pre-trained networks (such as [Inception-v3](http://pytorch.org/docs/master/torchvision/models.htmlinception-v3), [ResNet-50](http://pytorch.org/docs/master/torchvision/models.htmlid3), etc). Please use the code cell below to test other pre-trained PyTorch models. If you decide to pursue this _optional_ task, report performance on `human_files_short` and `dog_files_short`.
###Code
### (Optional)
### TODO: Report the performance of another pre-trained network.
### Feel free to use as many code cells as needed.
###Output
_____no_output_____
###Markdown
--- Step 3: Create a CNN to Classify Dog Breeds (from Scratch)Now that we have functions for detecting humans and dogs in images, we need a way to predict breed from images. In this step, you will create a CNN that classifies dog breeds. You must create your CNN _from scratch_ (so, you can't use transfer learning _yet_!), and you must attain a test accuracy of at least 10%. In Step 4 of this notebook, you will have the opportunity to use transfer learning to create a CNN that attains greatly improved accuracy.We mention that the task of assigning breed to dogs from images is considered exceptionally challenging. To see why, consider that *even a human* would have trouble distinguishing between a Brittany and a Welsh Springer Spaniel. Brittany | Welsh Springer Spaniel- | - | It is not difficult to find other dog breed pairs with minimal inter-class variation (for instance, Curly-Coated Retrievers and American Water Spaniels). Curly-Coated Retriever | American Water Spaniel- | - | Likewise, recall that labradors come in yellow, chocolate, and black. Your vision-based algorithm will have to conquer this high intra-class variation to determine how to classify all of these different shades as the same breed. Yellow Labrador | Chocolate Labrador | Black Labrador- | - | | We also mention that random chance presents an exceptionally low bar: setting aside the fact that the classes are slightly imabalanced, a random guess will provide a correct answer roughly 1 in 133 times, which corresponds to an accuracy of less than 1%. Remember that the practice is far ahead of the theory in deep learning. Experiment with many different architectures, and trust your intuition. And, of course, have fun! (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/stable/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dog_images/train`, `dog_images/valid`, and `dog_images/test`, respectively). You may find [this documentation on custom datasets](http://pytorch.org/docs/stable/torchvision/datasets.html) to be a useful resource. If you are interested in augmenting your training and/or validation data, check out the wide variety of [transforms](http://pytorch.org/docs/stable/torchvision/transforms.html?highlight=transform)!
###Code
import os
from torchvision import datasets
import torch.utils.data as data
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
### TODO: Write data loaders for training, validation, and test sets
## Specify appropriate transforms, and batch_sizes
BATCH_SIZE = 20
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
train_dir = os.path.join('dogImages','train')
val_dir = os.path.join('dogImages','valid')
test_dir = os.path.join('dogImages','test')
train_dataset = datasets.ImageFolder(train_dir, transform=transform)
val_dataset = datasets.ImageFolder(val_dir, transform=transform)
test_dataset = datasets.ImageFolder(test_dir, transform=transform)
train_loader = data.DataLoader(dataset=train_dataset,
shuffle=True,
batch_size=BATCH_SIZE)
val_loader = data.DataLoader(dataset=val_dataset,
shuffle=True,
batch_size=BATCH_SIZE)
test_loader = data.DataLoader(dataset=test_dataset,
shuffle=True,
batch_size=BATCH_SIZE)
loaders_scratch = {'train': train_loader,
'valid':val_loader,
'test':test_loader}
###Output
_____no_output_____
###Markdown
**Question 3:** Describe your chosen procedure for preprocessing the data. - How does your code resize the images (by cropping, stretching, etc)? What size did you pick for the input tensor, and why?- Did you decide to augment the dataset? If so, how (through translations, flips, rotations, etc)? If not, why not? **Answer**: (IMPLEMENTATION) Model ArchitectureCreate a CNN to classify dog breed. Use the template in the code cell below.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
### TODO: choose an architecture, and complete the class
def __init__(self):
super(Net, self).__init__()
## Define layers of a CNN
self.conv1 = nn.Conv2d(3, 32, 5)
# outputs (32,220,220)
self.pool = nn.MaxPool2d(2,2)
# first layer output after pool
# (32,110,110)
self.conv2 = nn.Conv2d(32,64,5)
#outputs (64,106,106)
# after max pool
# (64,53,53)
self.conv3 = nn.Conv2d(64,128,5)
# after max pool
# (128,24,24)
self.conv4 = nn.Conv2d(128,256,5)
#after max pool
# (256,10,10)
self.conv5 = nn.Conv2d(256,512,5)
#after max pool
# (512,3,3)
self.dropout = nn.Dropout(0.4)
self.fc1 = nn.Linear(512*3*3,2304)
# 133 dog breeds
# as we have 133 folders
self.fc2 = nn.Linear(2304,133)
def forward(self, x):
## Define forward behavior
x = self.pool(F.relu(self.conv1(x)))
#x = self.dropout(x)
x = self.pool(F.relu(self.conv2(x)))
#x = self.dropout(x)
x = self.pool(F.relu(self.conv3(x)))
#x = self.dropout(x)
x = self.pool(F.relu(self.conv4(x)))
x = self.pool(F.relu(self.conv5(x)))
x = self.dropout(x)
x = x.view(-1, 512*3*3)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
#-#-# You so NOT have to modify the code below this line. #-#-#
# instantiate the CNN
model_scratch = Net()
# move tensors to GPU if CUDA is available
if use_cuda:
model_scratch.cuda()
###Output
_____no_output_____
###Markdown
__Question 4:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/stable/optim.html). Save the chosen loss function as `criterion_scratch`, and the optimizer as `optimizer_scratch` below.
###Code
import torch.optim as optim
### TODO: select loss function
criterion_scratch = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
### TODO: select optimizer
optimizer_scratch = optim.SGD(model_scratch.parameters(), lr = 0.06,weight_decay=1e-3)
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_scratch.pt'`.
###Code
def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path):
"""returns trained model"""
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf
for epoch in range(1, n_epochs+1):
# initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
## find the loss and update the model parameters accordingly
## record the average training loss, using something like
## train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
model.zero_grad()
#print(target)
output = model(data)
#print(output)
loss = criterion_scratch(output,target)
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
# Backward pass.
loss.backward()
# Update the parameters in the optimizer.
optimizer.step()
if batch_idx % 100 == 0:
print('Epoch %d, Batch %d loss: %.6f' % (epoch, batch_idx + 1, train_loss))
######################
# validate the model #
######################
model.eval()
for batch_idx, (data, target) in enumerate(loaders['valid']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion_scratch(output,target)
## update the average validation loss
valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss
))
## TODO: save the model if validation loss has decreased
if valid_loss < valid_loss_min:
torch.save(model.state_dict(), save_path)
valid_loss_min = valid_loss
# return trained model
return model
# train the model
model_scratch = train(10, loaders_scratch, model_scratch, optimizer_scratch,
criterion_scratch, use_cuda, 'model_scratch.pt')
# load the model that got the best validation accuracy
#model_scratch.load_state_dict(torch.load('model_scratch.pt'))
###Output
Epoch 1, Batch 1 loss: 4.889373
Epoch 1, Batch 101 loss: 4.887956
Epoch 1, Batch 201 loss: 4.885204
Epoch 1, Batch 301 loss: 4.869027
Epoch: 1 Training Loss: 4.867319 Validation Loss: 4.861140
Epoch 2, Batch 1 loss: 4.836076
Epoch 2, Batch 101 loss: 4.868313
Epoch 2, Batch 201 loss: 4.864567
Epoch 2, Batch 301 loss: 4.865156
Epoch: 2 Training Loss: 4.866021 Validation Loss: 4.858485
Epoch 3, Batch 1 loss: 4.860460
Epoch 3, Batch 101 loss: 4.853211
Epoch 3, Batch 201 loss: 4.842324
Epoch 3, Batch 301 loss: 4.832800
Epoch: 3 Training Loss: 4.834305 Validation Loss: 4.878834
Epoch 4, Batch 1 loss: 4.880487
Epoch 4, Batch 101 loss: 4.876013
Epoch 4, Batch 201 loss: 4.871992
Epoch 4, Batch 301 loss: 4.846231
Epoch: 4 Training Loss: 4.841362 Validation Loss: 4.808552
Epoch 5, Batch 1 loss: 4.899741
Epoch 5, Batch 101 loss: 4.825074
Epoch 5, Batch 201 loss: 4.785785
Epoch 5, Batch 301 loss: 4.774435
Epoch: 5 Training Loss: 4.770420 Validation Loss: 4.633053
Epoch 6, Batch 1 loss: 4.295796
Epoch 6, Batch 101 loss: 4.830859
Epoch 6, Batch 201 loss: 4.798131
Epoch 6, Batch 301 loss: 4.781902
Epoch: 6 Training Loss: 4.779203 Validation Loss: 4.819456
Epoch 7, Batch 1 loss: 4.832332
Epoch 7, Batch 101 loss: 4.686429
Epoch 7, Batch 201 loss: 4.726315
Epoch 7, Batch 301 loss: 4.706894
Epoch: 7 Training Loss: 4.718076 Validation Loss: 4.736145
Epoch 8, Batch 1 loss: 4.572536
Epoch 8, Batch 101 loss: 4.659453
Epoch 8, Batch 201 loss: 4.636588
Epoch 8, Batch 301 loss: 4.621697
Epoch: 8 Training Loss: 4.620641 Validation Loss: 4.567732
Epoch 9, Batch 1 loss: 4.688214
Epoch 9, Batch 101 loss: 4.574794
Epoch 9, Batch 201 loss: 4.546075
Epoch 9, Batch 301 loss: 4.555486
Epoch: 9 Training Loss: 4.554761 Validation Loss: 4.736269
Epoch 10, Batch 1 loss: 4.636405
Epoch 10, Batch 101 loss: 4.484521
Epoch 10, Batch 201 loss: 4.473972
Epoch 10, Batch 301 loss: 4.457046
Epoch: 10 Training Loss: 4.453424 Validation Loss: 4.554268
###Markdown
(IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 10%.
###Code
def test(loaders, model, criterion, use_cuda):
# monitor test loss and accuracy
test_loss = 0.
correct = 0.
total = 0.
model.eval()
for batch_idx, (data, target) in enumerate(loaders['test']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update average test loss
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
# convert output probabilities to predicted class
pred = output.data.max(1, keepdim=True)[1]
# compare predictions to true label
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
# call test function
test(loaders_scratch, model_scratch, criterion_scratch, use_cuda)
###Output
_____no_output_____
###Markdown
--- Step 4: Create a CNN to Classify Dog Breeds (using Transfer Learning)You will now use transfer learning to create a CNN that can identify dog breed from images. Your CNN must attain at least 60% accuracy on the test set. (IMPLEMENTATION) Specify Data Loaders for the Dog DatasetUse the code cell below to write three separate [data loaders](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader) for the training, validation, and test datasets of dog images (located at `dogImages/train`, `dogImages/valid`, and `dogImages/test`, respectively). If you like, **you are welcome to use the same data loaders from the previous step**, when you created a CNN from scratch.
###Code
## TODO: Specify data loaders
BATCH_SIZE = 32
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
train_dir = os.path.join('dogImages','train')
val_dir = os.path.join('dogImages','valid')
test_dir = os.path.join('dogImages','test')
train_dataset = datasets.ImageFolder(train_dir, transform=transform)
val_dataset = datasets.ImageFolder(val_dir, transform=transform)
test_dataset = datasets.ImageFolder(test_dir, transform=transform)
train_loader = data.DataLoader(dataset=train_dataset,
shuffle=True,
batch_size=BATCH_SIZE)
val_loader = data.DataLoader(dataset=val_dataset,
shuffle=True,
batch_size=BATCH_SIZE)
test_loader = data.DataLoader(dataset=test_dataset,
shuffle=True,
batch_size=BATCH_SIZE)
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Model ArchitectureUse transfer learning to create a CNN to classify dog breed. Use the code cell below, and save your initialized model as the variable `model_transfer`.
###Code
import torchvision.models as models
import torch.nn as nn
## TODO: Specify model architecture
class MyNet(nn.Module):
def __init__(self):
super(MyNet, self).__init__()
VGG16 = models.vgg16(pretrained=True)
for param in VGG16.parameters():
VGG16.requires_grad_(False)
modules = list(VGG16.children())[:-1]
self.VGG16 = nn.Sequential(*modules)
self.fc = nn.Linear(VGG16.fc.in_features, 133)
def forward(self, images):
features = self.VGG16(images)
features = features.view(features.size(0), -1)
features = self.fc(features)
return features
model_transfer = MyNet()
if use_cuda:
model_transfer = model_transfer.cuda()
###Output
_____no_output_____
###Markdown
__Question 5:__ Outline the steps you took to get to your final CNN architecture and your reasoning at each step. Describe why you think the architecture is suitable for the current problem. __Answer:__ (IMPLEMENTATION) Specify Loss Function and OptimizerUse the next code cell to specify a [loss function](http://pytorch.org/docs/master/nn.htmlloss-functions) and [optimizer](http://pytorch.org/docs/master/optim.html). Save the chosen loss function as `criterion_transfer`, and the optimizer as `optimizer_transfer` below.
###Code
criterion_transfer = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
optimizer_transfer = optim.Adam(model_scratch.parameters())
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Train and Validate the ModelTrain and validate your model in the code cell below. [Save the final model parameters](http://pytorch.org/docs/master/notes/serialization.html) at filepath `'model_transfer.pt'`.
###Code
# train the model
model_transfer = train(3, loaders_transfer, model_transfer, optimizer_transfer, criterion_transfer, True, 'model_transfer.pt')
# load the model that got the best validation accuracy (uncomment the line below)
#model_transfer.load_state_dict(torch.load('model_transfer.pt'))
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Test the ModelTry out your model on the test dataset of dog images. Use the code cell below to calculate and print the test loss and accuracy. Ensure that your test accuracy is greater than 60%.
###Code
test(loaders_transfer, model_transfer, criterion_transfer, use_cuda)
###Output
_____no_output_____
###Markdown
(IMPLEMENTATION) Predict Dog Breed with the ModelWrite a function that takes an image path as input and returns the dog breed (`Affenpinscher`, `Afghan hound`, etc) that is predicted by your model.
###Code
### TODO: Write a function that takes a path to an image as input
### and returns the dog breed that is predicted by the model.
# list of class names by index, i.e. a name can be accessed like class_names[0]
class_names = [item[4:].replace("_", " ") for item in data_transfer['train'].classes]
def predict_breed_transfer(img_path):
# load the image and return the predicted breed
return None
###Output
_____no_output_____
###Markdown
--- Step 5: Write your AlgorithmWrite an algorithm that accepts a file path to an image and first determines whether the image contains a human, dog, or neither. Then,- if a __dog__ is detected in the image, return the predicted breed.- if a __human__ is detected in the image, return the resembling dog breed.- if __neither__ is detected in the image, provide output that indicates an error.You are welcome to write your own functions for detecting humans and dogs in images, but feel free to use the `face_detector` and `human_detector` functions developed above. You are __required__ to use your CNN from Step 4 to predict dog breed. Some sample output for our algorithm is provided below, but feel free to design your own user experience! (IMPLEMENTATION) Write your Algorithm
###Code
### TODO: Write your algorithm.
### Feel free to use as many code cells as needed.
def run_app(img_path):
## handle cases for a human face, dog, and neither
###Output
_____no_output_____
###Markdown
--- Step 6: Test Your AlgorithmIn this section, you will take your new algorithm for a spin! What kind of dog does the algorithm think that _you_ look like? If you have a dog, does it predict your dog's breed accurately? If you have a cat, does it mistakenly think that your cat is a dog? (IMPLEMENTATION) Test Your Algorithm on Sample Images!Test your algorithm at least six images on your computer. Feel free to use any images you like. Use at least two human and two dog images. __Question 6:__ Is the output better than you expected :) ? Or worse :( ? Provide at least three possible points of improvement for your algorithm. __Answer:__ (Three possible points for improvement)
###Code
## TODO: Execute your algorithm from Step 6 on
## at least 6 images on your computer.
## Feel free to use as many code cells as needed.
## suggested code, below
for file in np.hstack((human_files[:3], dog_files[:3])):
run_app(file)
###Output
_____no_output_____ |
2.2) CNN Models - Test Cases.ipynb | ###Markdown
2.2 CNN Models - Test CasesThe trained CNN model was performed to a hold-out test set with 10,873 images.The network obtained 0.743 and 0.997 AUC-PRC on the hold-out test set for cored plaque and diffuse plaque respectively.
###Code
import time, os
import torch
torch.manual_seed(42)
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import transforms
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
CSV_DIR = 'data/CSVs/test.csv'
MODEL_DIR = 'models/CNN_model_parameters.pkl'
IMG_DIR = 'data/tiles/hold-out/'
NEGATIVE_DIR = 'data/seg/negatives/'
SAVE_DIR = 'data/outputs/'
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
batch_size = 32
num_workers = 8
norm = np.load('utils/normalization.npy').item()
from torch.utils.data import Dataset
from PIL import Image
class MultilabelDataset(Dataset):
def __init__(self, csv_path, img_path, transform=None):
"""
Args:
csv_path (string): path to csv file
img_path (string): path to the folder where images are
transform: pytorch transforms for transforms and tensor conversion
"""
self.data_info = pd.read_csv(csv_path)
self.img_path = img_path
self.transform = transform
c=torch.Tensor(self.data_info.loc[:,'cored'])
d=torch.Tensor(self.data_info.loc[:,'diffuse'])
a=torch.Tensor(self.data_info.loc[:,'CAA'])
c=c.view(c.shape[0],1)
d=d.view(d.shape[0],1)
a=a.view(a.shape[0],1)
self.raw_labels = torch.cat([c,d,a], dim=1)
self.labels = (torch.cat([c,d,a], dim=1)>0.99).type(torch.FloatTensor)
def __getitem__(self, index):
# Get label(class) of the image based on the cropped pandas column
single_image_label = self.labels[index]
raw_label = self.raw_labels[index]
# Get image name from the pandas df
single_image_name = str(self.data_info.loc[index,'imagename'])
# Open image
try:
img_as_img = Image.open(self.img_path + single_image_name)
except:
img_as_img = Image.open(NEGATIVE_DIR + single_image_name)
# Transform image to tensor
if self.transform is not None:
img_as_img = self.transform(img_as_img)
# Return image and the label
return (img_as_img, single_image_label, raw_label, single_image_name)
def __len__(self):
return len(self.data_info.index)
data_transforms = {
'test' : transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm['mean'], norm['std'])
])
}
image_datasets = {'test': MultilabelDataset(CSV_DIR, IMG_DIR,
data_transforms['test'])}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
for x in ['test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['test']}
image_classes = ['cored','diffuse','CAA']
use_gpu = torch.cuda.is_available()
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array(norm['mean'])
std = np.array(norm['std'])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure()
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, labels, raw_labels, names = next(iter(dataloaders['test']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)
class Net(nn.Module):
def __init__(self, fc_nodes=512, num_classes=3, dropout=0.5):
super(Net, self).__init__()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def dev_model(model, criterion, phase='test', gpu_id=None):
phase = phase
since = time.time()
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,
shuffle=False, num_workers=num_workers)
for x in [phase]}
model.train(False)
running_loss = 0.0
running_corrects = torch.zeros(len(image_classes))
running_preds = torch.Tensor(0)
running_predictions = torch.Tensor(0)
running_labels = torch.Tensor(0)
running_raw_labels = torch.Tensor(0)
# Iterate over data.
step = 0
for data in dataloaders[phase]:
step += 1
# get the inputs
inputs, labels, raw_labels, names = data
running_labels = torch.cat([running_labels, labels])
running_raw_labels = torch.cat([running_raw_labels, raw_labels])
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda(gpu_id))
labels = Variable(labels.cuda(gpu_id))
else:
inputs, labels = Variable(inputs), Variable(labels)
# forward
outputs = model(inputs)
preds = F.sigmoid(outputs) #posibility for each class
#print(preds)
if use_gpu:
predictions = (preds>0.5).type(torch.cuda.FloatTensor)
else:
predictions = (preds>0.5).type(torch.FloatTensor)
loss = criterion(outputs, labels)
preds = preds.data.cpu()
predictions = predictions.data.cpu()
labels = labels.data.cpu()
# statistics
running_loss += loss.data[0]
running_corrects += torch.sum(predictions==labels, 0).type(torch.FloatTensor)
running_preds = torch.cat([running_preds, preds])
running_predictions = torch.cat([running_predictions, predictions])
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f}\n Cored: {:.4f} Diffuse: {:.4f} CAA: {:.4f}'.format(
phase, epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2]))
print()
time_elapsed = time.time() - since
print('Prediction complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
return epoch_acc, running_preds, running_predictions, running_labels
from sklearn.metrics import roc_curve, auc, precision_recall_curve
def plot_roc(preds, label, image_classes, size=20, path=None):
colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k']
fig = plt.figure(figsize=(1.2*size, size))
ax = plt.axes()
for i in range(preds.shape[1]):
fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel())
lw = 0.2*size
# Plot all ROC curves
ax.plot([0, 1], [0, 1], 'k--', lw=lw, label='random')
ax.plot(fpr, tpr,
label='ROC-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})'
''.format(auc(fpr, tpr)),
color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=1.8*size)
ax.set_ylabel('True Positive Rate', fontsize=1.8*size)
ax.set_title('Receiver operating characteristic Curve', fontsize=1.8*size, y=1.01)
ax.legend(loc=0, fontsize=1.5*size)
ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
if path != None:
fig.savefig(path)
# plt.close(fig)
print('saved')
def plot_prc(preds, label, image_classes, size=20, path=None):
colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k']
fig = plt.figure(figsize=(1.2*size,size))
ax = plt.axes()
for i in range(preds.shape[1]):
rp = (label[:,i]>0).sum()/len(label)
precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel())
lw=0.2*size
ax.plot(recall, precision,
label='PR-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})'
''.format(auc(recall, precision)),
color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw)
ax.plot([0, 1], [rp, rp], 'k--', color=colors[(i+preds.shape[1])%len(colors)], lw=lw, label='random')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall', fontsize=1.8*size)
ax.set_ylabel('Precision', fontsize=1.8*size)
ax.set_title('Precision-Recall curve', fontsize=1.8*size, y=1.01)
ax.legend(loc="lower left", bbox_to_anchor=(0.01, 0.1), fontsize=1.5*size)
ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
if path != None:
fig.savefig(path)
# plt.close(fig)
print('saved')
def auc_roc(preds, label):
aucroc = []
for i in range(preds.shape[1]):
fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel())
aucroc.append(auc(fpr, tpr))
return aucroc
def auc_prc(preds, label):
aucprc = []
for i in range(preds.shape[1]):
precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel())
aucprc.append(auc(recall, precision))
return aucprc
criterion = nn.MultiLabelSoftMarginLoss(size_average=False)
model = torch.load(MODEL_DIR, map_location=lambda storage, loc: storage)
if use_gpu:
model = model.module.cuda()
# take 10s running on single GPU
try:
acc, pred, prediction, target = dev_model(model.module, criterion, phase='test', gpu_id=None)
except:
acc, pred, prediction, target = dev_model(model, criterion, phase='test', gpu_id=None)
label = target.numpy()
preds = pred.numpy()
output = {}
for i in range(3):
fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel())
precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel())
output['{} fpr'.format(image_classes[i])] = fpr
output['{} tpr'.format(image_classes[i])] = tpr
output['{} precision'.format(image_classes[i])] = precision
output['{} recall'.format(image_classes[i])] = recall
outcsv = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in output.items() ]))
outcsv.to_csv(SAVE_DIR+'CNN_test_output.csv', index=False)
plot_roc(pred.numpy(), target.numpy(), image_classes, size=30)
plot_prc(pred.numpy(), target.numpy(), image_classes, size=30)
###Output
_____no_output_____
###Markdown
2.2 CNN Models - Test CasesThe trained CNN model was performed to a hold-out test set with 10,873 images.The network obtained 0.743 and 0.997 AUC-PRC on the hold-out test set for cored plaque and diffuse plaque respectively.
###Code
import time, os
import torch
torch.manual_seed(42)
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import transforms
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
CSV_DIR = 'data/CSVs/test.csv'
MODEL_DIR = 'models/CNN_model_parameters.pkl'
IMG_DIR = 'data/tiles/hold-out/'
NEGATIVE_DIR = 'data/seg/negatives/'
SAVE_DIR = 'data/outputs/'
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
batch_size = 32
num_workers = 8
norm = np.load('utils/normalization.npy', allow_pickle=True).item()
from torch.utils.data import Dataset
from PIL import Image
class MultilabelDataset(Dataset):
def __init__(self, csv_path, img_path, transform=None):
"""
Args:
csv_path (string): path to csv file
img_path (string): path to the folder where images are
transform: pytorch transforms for transforms and tensor conversion
"""
self.data_info = pd.read_csv(csv_path)
self.img_path = img_path
self.transform = transform
c=torch.Tensor(self.data_info.loc[:,'cored'])
d=torch.Tensor(self.data_info.loc[:,'diffuse'])
a=torch.Tensor(self.data_info.loc[:,'CAA'])
c=c.view(c.shape[0],1)
d=d.view(d.shape[0],1)
a=a.view(a.shape[0],1)
self.raw_labels = torch.cat([c,d,a], dim=1)
self.labels = (torch.cat([c,d,a], dim=1)>0.99).type(torch.FloatTensor)
def __getitem__(self, index):
# Get label(class) of the image based on the cropped pandas column
single_image_label = self.labels[index]
raw_label = self.raw_labels[index]
# Get image name from the pandas df
single_image_name = str(self.data_info.loc[index,'imagename'])
# Open image
try:
img_as_img = Image.open(self.img_path + single_image_name)
except:
img_as_img = Image.open(NEGATIVE_DIR + single_image_name)
# Transform image to tensor
if self.transform is not None:
img_as_img = self.transform(img_as_img)
# Return image and the label
return (img_as_img, single_image_label, raw_label, single_image_name)
def __len__(self):
return len(self.data_info.index)
data_transforms = {
'test' : transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm['mean'], norm['std'])
])
}
image_datasets = {'test': MultilabelDataset(CSV_DIR, IMG_DIR,
data_transforms['test'])}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
for x in ['test']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['test']}
image_classes = ['cored','diffuse','CAA']
use_gpu = torch.cuda.is_available()
def imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array(norm['mean'])
std = np.array(norm['std'])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.figure()
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
# Get a batch of training data
inputs, labels, raw_labels, names = next(iter(dataloaders['test']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
imshow(out)
class Net(nn.Module):
def __init__(self, fc_nodes=512, num_classes=3, dropout=0.5):
super(Net, self).__init__()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def dev_model(model, criterion, phase='test', gpu_id=None):
phase = phase
since = time.time()
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,
shuffle=False, num_workers=num_workers)
for x in [phase]}
model.train(False)
running_loss = 0.0
running_corrects = torch.zeros(len(image_classes))
running_preds = torch.Tensor(0)
running_predictions = torch.Tensor(0)
running_labels = torch.Tensor(0)
running_raw_labels = torch.Tensor(0)
# Iterate over data.
step = 0
for data in dataloaders[phase]:
step += 1
# get the inputs
inputs, labels, raw_labels, names = data
running_labels = torch.cat([running_labels, labels])
running_raw_labels = torch.cat([running_raw_labels, raw_labels])
# wrap them in Variable
if use_gpu:
inputs = Variable(inputs.cuda(gpu_id))
labels = Variable(labels.cuda(gpu_id))
else:
inputs, labels = Variable(inputs), Variable(labels)
# forward
outputs = model(inputs)
preds = F.sigmoid(outputs) #posibility for each class
#print(preds)
if use_gpu:
predictions = (preds>0.5).type(torch.cuda.FloatTensor)
else:
predictions = (preds>0.5).type(torch.FloatTensor)
loss = criterion(outputs, labels)
preds = preds.data.cpu()
predictions = predictions.data.cpu()
labels = labels.data.cpu()
# statistics
running_loss += loss.data[0]
running_corrects += torch.sum(predictions==labels, 0).type(torch.FloatTensor)
running_preds = torch.cat([running_preds, preds])
running_predictions = torch.cat([running_predictions, predictions])
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f}\n Cored: {:.4f} Diffuse: {:.4f} CAA: {:.4f}'.format(
phase, epoch_loss, epoch_acc[0], epoch_acc[1], epoch_acc[2]))
print()
time_elapsed = time.time() - since
print('Prediction complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
return epoch_acc, running_preds, running_predictions, running_labels
from sklearn.metrics import roc_curve, auc, precision_recall_curve
def plot_roc(preds, label, image_classes, size=20, path=None):
colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k']
fig = plt.figure(figsize=(1.2*size, size))
ax = plt.axes()
for i in range(preds.shape[1]):
fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel())
lw = 0.2*size
# Plot all ROC curves
ax.plot([0, 1], [0, 1], 'k--', lw=lw, label='random')
ax.plot(fpr, tpr,
label='ROC-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})'
''.format(auc(fpr, tpr)),
color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate', fontsize=1.8*size)
ax.set_ylabel('True Positive Rate', fontsize=1.8*size)
ax.set_title('Receiver operating characteristic Curve', fontsize=1.8*size, y=1.01)
ax.legend(loc=0, fontsize=1.5*size)
ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
if path != None:
fig.savefig(path)
# plt.close(fig)
print('saved')
def plot_prc(preds, label, image_classes, size=20, path=None):
colors = ['pink','c','deeppink', 'b', 'g', 'm', 'y', 'r', 'k']
fig = plt.figure(figsize=(1.2*size,size))
ax = plt.axes()
for i in range(preds.shape[1]):
rp = (label[:,i]>0).sum()/len(label)
precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel())
lw=0.2*size
ax.plot(recall, precision,
label='PR-curve of {}'.format(image_classes[i])+ '( area = {0:0.3f})'
''.format(auc(recall, precision)),
color=colors[(i+preds.shape[1])%len(colors)], linewidth=lw)
ax.plot([0, 1], [rp, rp], 'k--', color=colors[(i+preds.shape[1])%len(colors)], lw=lw, label='random')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('Recall', fontsize=1.8*size)
ax.set_ylabel('Precision', fontsize=1.8*size)
ax.set_title('Precision-Recall curve', fontsize=1.8*size, y=1.01)
ax.legend(loc="lower left", bbox_to_anchor=(0.01, 0.1), fontsize=1.5*size)
ax.xaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
ax.yaxis.set_tick_params(labelsize=1.6*size, size=size/2, width=0.2*size)
if path != None:
fig.savefig(path)
# plt.close(fig)
print('saved')
def auc_roc(preds, label):
aucroc = []
for i in range(preds.shape[1]):
fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel())
aucroc.append(auc(fpr, tpr))
return aucroc
def auc_prc(preds, label):
aucprc = []
for i in range(preds.shape[1]):
precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel())
aucprc.append(auc(recall, precision))
return aucprc
criterion = nn.MultiLabelSoftMarginLoss(size_average=False)
model = torch.load(MODEL_DIR, map_location=lambda storage, loc: storage)
if use_gpu:
model = model.module.cuda()
# take 10s running on single GPU
try:
acc, pred, prediction, target = dev_model(model.module, criterion, phase='test', gpu_id=None)
except:
acc, pred, prediction, target = dev_model(model, criterion, phase='test', gpu_id=None)
label = target.numpy()
preds = pred.numpy()
output = {}
for i in range(3):
fpr, tpr, _ = roc_curve(label[:,i].ravel(), preds[:,i].ravel())
precision, recall, _ = precision_recall_curve(label[:,i].ravel(), preds[:,i].ravel())
output['{} fpr'.format(image_classes[i])] = fpr
output['{} tpr'.format(image_classes[i])] = tpr
output['{} precision'.format(image_classes[i])] = precision
output['{} recall'.format(image_classes[i])] = recall
outcsv = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in output.items() ]))
outcsv.to_csv(SAVE_DIR+'CNN_test_output.csv', index=False)
plot_roc(pred.numpy(), target.numpy(), image_classes, size=30)
plot_prc(pred.numpy(), target.numpy(), image_classes, size=30)
###Output
_____no_output_____ |
pytorch/170912-tutorial.ipynb | ###Markdown
CIFAR-10
###Code
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),
# [0, 1] => [-1, 1]
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(images.size(), labels.size())
imshow(torchvision.utils.make_grid(images))
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# Define a CNN
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2) # ใใฉใกใผใฟใใชใใฌใคใคใOK๏ผ
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x) # softmaxใฏๅซใใชใ
return x
net = Net()
net
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data # minibatch
# ใใผใฟใฏVariableใงๅฒใ
inputs, labels = Variable(inputs), Variable(labels)
# ๅใใใใใใใจใซ่็ฉใใๅพ้
ใฏใชใปใใใใ
# ใใใใใๅไฝใงใใฉใกใผใฟๆดๆฐใใใใ
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
if i % 2000 == 1999: # 2000 minibatchใใจใซ่กจ็คบ
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
dataiter = iter(testloader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
print('Ground Truth:', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
outputs = net(Variable(images))
outputs
_, predicted = torch.max(outputs.data, 1)
predicted[1].numpy()[0]
print('Predicted: ', ' '.join('%5s' % classes[predicted[j].numpy()[0]] for j in range(4)))
correct = 0
total = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
for data in testloader:
images, labels = data
outputs = net(Variable(images))
_, predicted = torch.max(outputs.data, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i]
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
###Output
Accuracy of plane : 74 %
Accuracy of car : 65 %
Accuracy of bird : 42 %
Accuracy of cat : 12 %
Accuracy of deer : 43 %
Accuracy of dog : 70 %
Accuracy of frog : 70 %
Accuracy of horse : 47 %
Accuracy of ship : 46 %
Accuracy of truck : 61 %
|
J4A Notebook 3 - Widgets & Querying an API.ipynb | ###Markdown
Jupyter for Analysts - Notebook 3 Welcome to Jupyter for Analysts Notebook 3!! So by now you have queried nbgallery for notebooks, interacted with code in notebooks, found a notebook on nbgallery, and ran at least 2 notebooks! This is seriously AMAZING progress and you should be incredibly proud. Being able to use Jupyter is a huge advantage for you! We promise, there will be notebooks out there that you will want to use for your work! :) This notebook is going to dive in to more interaction with the code you are running. We will be querying ---service --- using 'widgets' . You have actually already seen a widget but probably didn't notice. That is perfectly OK! We are going to show you a couple more widgets you might see in notebooks. Let's start by looking at some basic widgets. Run the new few cells below to see widgets in action! Ok. First step, let's import the widgets in to this notebook.
###Code
from IPython import display as disp
from IPython.display import display, HTML
from ipywidgets import widgets, Layout
better_layout = {'height' : '100px', 'width':'75%' }
###Output
_____no_output_____
###Markdown
Now start running the code below to play with widgets!The first one you will see is the string widget. Notice that there is a textbox for you to type in. Go ahead type something (anything you want) in the box and watch the text JUST below the box change! :) Don't worry about the actual output here.
###Code
from ipywidgets import *
def f(x):
print(x)
interact(f, x="Type stuff here")
###Output
_____no_output_____
###Markdown
Thank u, next. Ok, run the code below to see a 'progress bar' widget in action. This particular progress bar is just counting up to a number, but in other notebooks this could be used to show you how much longer you have until your results are ready! Notice how it takes until the progress bar is done for the asterisk to turn into a number!
###Code
import time
progress_bar = widgets.FloatProgress(min=0.0, max=2.0)
display(progress_bar)
x = 0
while x != 2:
x+=.25
progress_bar.value = x
time.sleep(1)
print("Complete!")
###Output
_____no_output_____
###Markdown
As I'm writing this notebook, there is a big Oreo cookie discussion going on in the office. (How much cream is there in the regular, double, mega, and most stufed? Is there a ratio?) Sorry - I sidetracked. But now I am hungry for cookies! How many should I eat? Run the code below and let me know using the slider widget!
###Code
widgets.FloatSlider(min=0, max=10, step=0.5,description='Number of Cookies' )
###Output
_____no_output_____
###Markdown
Okay... that's fine, but what if you let me choose a range of the number of cookies I could eat? (Run the next cell!)
###Code
widgets.FloatRangeSlider(min=0, max=10, step=0.5, description='Cookie Range')
###Output
_____no_output_____
###Markdown
Do you think I *actually* ate within the range of cookies you gave me? Run the next cell, and check True or False. Note: You do not have to run this cell again once you click in the check box. Just run the cell after that to see my response!
###Code
f = widgets.Checkbox(description='False?:', value=False)
t = widgets.Checkbox(description='True?:', value=True)
display(f, t)
if f.value==True:
print("Are you sure?")
else:
print("Yeah.... probably :)")
###Output
_____no_output_____
###Markdown
Exercise 4: Who's in space right now? Querying an API Take a stab at running the code below, which is broken up in to a few cells. This code goes to ---service--- and outputs the information you want (using widgets!)Okay... see how far you make it on your own. YOU CAN DO IT! Note: Not every cell will return output. Wait for the asterisk to go away, then run the next cell!
###Code
#imports a library and requests access to the Open Notify API from NASA
import requests
people = requests.get('http://api.open-notify.org/astros.json')
people = people.json()
#creates widgets to prompt user for information they want from the Open Notify API
name = widgets.Checkbox(description='Full Name', value=True)
space_craft = widgets.Checkbox(description='Space Craft', value=True)
total_people = widgets.Checkbox(description='Total Number of People in Space', value=True)
#displays the widgets
display(name, space_craft, total_people)
#checks to see if astronaut name was picked. if it was picked, then it tells you name of astronauts currently in space
if name.value==True:
for person in people['people']:
print("Astronaut:",person['name'])
#prints a blank line
print()
#checks to see if space craft name was picked. if it was picked, then it tells you name of
#space craft currently in space
if space_craft.value == True:
for person in people['people']:
print("Space Craft for", person['name'], ":", person['craft'])
#prints a blank line
print()
#checks to see if total number of people in space was picked. if it was picked, then it tells you the total number of
#astronauts currently in space
if total_people.value == True:
print("Total Number of People in Space:", people['number'])
#checks to see if no value was selected. if no value selected, an error message is printed
#to ask user to select a value
if total_people.value==False and space_craft.value==False and name.value==False:
print("Please check an item in the widget box above before running this code cell.")
###Output
_____no_output_____ |
scripts/notebooks/halo/GM_merger_tree_Check.ipynb | ###Markdown
Draw merger tree using GalaxyMaker + ConsistenTree2015. 12. 01 Functionally OK. Looks ugly: displacement dx should be more adaptive.
###Code
import tree.ctutils as ctu
def link_circle_up(x, y, r, ax, finish=0):
"""
Given two points, draw circle at the first point and link it to the second point
without drawing the second point by default (so that it can repeat to build a long thread of bids).
for the last point, pass the radius of the last circle to the argument 'finish'
For example,
fig = plt.figure()
ax = fig.add_subplot(111)
xpos = [1,1] & ypos = [2,4]
link_circle(xpos, ypos, 10, ax)
xpos = [1,2] & ypos = [4,6]
link_circle(xpos, ypos, 30, ax, finish=30)
fig.show()
"""
ax.plot(x[0], y[0], 'o', ms=r, lw=2, alpha=0.7, mfc='orange')
ax.plot(x, y, '-', c='black',alpha=0.7)
if finish > 0:
ax.plot(x[1], y[1], 'o', ms=20, lw=2, alpha=0.7, mfc='orange')
def get_xarr(n):
import numpy as np
arr=[]
a=0
for i in range(n):
a += (-1)**i * i
arr.append(a)
return np.asarray(arr)
def recursive_tree(idx, tt, nstep, ax, x0, y0, dx, mass_unit=1e10):
import tree.draw_merger_tree as dmt
prgs = ctu.get_progenitors(tt, idx)
i_this_gal = np.where(tt['id'] == idx)
m = np.sqrt(tt[i_this_gal]["mvir"] / mass_unit)
#print("IDX:", idx, "prgs: ",prgs, "mass:", m, i_this_gal)
nprg = len(prgs)
if nstep == 0:
return
else:
if nprg == 0:
return
else:
if nprg > 1:
#dx *= 1.1
dx += 0.5
# print("Branch!", nprg)
#xarr = get_xarr(nprg) * dx + x0
xarr = np.arange(nprg) * dx + x0
for i, x in zip(prgs, xarr):
link_circle_up([x0, x], [y0, y0 + 1], m, ax)
recursive_tree(i, tt, nstep - 1, ax, x, y0 + 1, dx, mass_unit=mass_unit)
from tree import treemodule
from tree import treeutils
import pickle
import numpy as np
alltrees = treemodule.CTree()
wdir = '/home/hoseung/Work/data/05427/'
#wdir = './'
is_gal = True
if is_gal:
# Galaxy tree
tree_path = 'GalaxyMaker/Trees/'
else:
# halo tree
tree_path = 'halo/Trees/'
load_extended_tree = True
if load_extended_tree:
try:
alltrees = pickle.load(open(wdir + tree_path + "extended_tree.pickle", "rb" ))
print("Loaded an extended tree")
except:
load_extended_tree = False
if not load_extended_tree:
"""
info file of each snapshot are required.
"""
alltrees = treemodule.CTree()
alltrees.load(filename= wdir + tree_path + 'tree_0_0_0.dat')
# Fix nout -----------------------------------------------------
nout_max = alltrees.data['nout'].max()
alltrees.data['nout'] += 187 - nout_max
print("------ NOUT fixed")
alltrees.data = ctu.augment_tree(alltrees.data, wdir, is_gal=is_gal)
print("------ tree data extended")
def extract_main_tree(treedata, idx=None, verbose=False):
"""
Returns a single branch/trunk of tree following only the main progenitors.
Works with both alltrees or atree.
Search until no progenitor is found. Doesn't matter how long the given tree is.
Only earlier snapshots are searched for.
"""
if idx == None:
idx = treedata['id'][0]
if verbose:
print("No idx is given")
print("idx = ", idx)
nprg = 1
ind_list=[np.where(treedata['id'] == idx)[0][0]]
# main progenitor = mmp.
while nprg > 0:
idx = ctu.get_progenitors(treedata, idx, main=True)
# print(idx)
ind_list.append(np.where(treedata['id'] == idx[0])[0][0])
nprg = ctu.get_npr(treedata, idx[0])
return treedata[ind_list]
import matplotlib.pyplot as plt
nout_fi = 187
nout_ini = 30
i_final = np.where(alltrees.data["nout"] == nout_fi)
ttt_sub = alltrees.data[i_final]
nouts = np.arange(nout_fi - nout_ini + 1)
final_gals = ttt_sub['id']
final_gals_org = ttt_sub['Orig_halo_id']
plt.ioff()
#figure(figsize=[6,6])
#ax = fig.add_subplot(211)
#aexps = np.unique(alltrees.data["aexp"])[:len(nouts)]
aexps = np.unique(alltrees.data["aexp"])[:-len(nouts):-1]
zreds = ["%.2f" % (1/i -1) for i in aexps]
import os
if not os.path.isdir(wdir + "mergertrees/"):
os.mkdir(wdir + "mergertrees/")
for galid in final_gals:
#galid = 42216
#galid = 42207
plt.clf()
fig, ax = plt.subplots(1,2)
fig.set_size_inches([12,6])
sidgal = str(galid).zfill(5)
#print(zreds)
atree = ctu.extract_a_tree(alltrees.data, galid)
mtree = extract_main_tree(atree)
ax[0].scatter(atree['aexp'], np.log10(atree['m']), edgecolors='none', alpha=0.3)
ax[0].scatter(mtree['aexp'], np.log10(mtree['m']), edgecolors='none', alpha=0.6,
facecolors='red')
ax[0].set_xlim([0.15,1.1])
ax[0].set_xticks(aexps[0:151:20])
ax[0].set_xticklabels(zreds[0:151:20])
ax[0].set_title(galid)
recursive_tree(galid, atree, 150, ax[1], 0, 0, 0.8, mass_unit=2e8)
# y axis label (redshift)
ax[1].set_ylabel("Redshift")
#ax.set_xlim([-0.5,30])
ax[1].set_ylim([-5,155])
ax[1].set_yticks(range(0,151,10))
ax[1].set_yticklabels(zreds[0:151:10])
#plt.yticks(range(0,151,10), zreds[0:151:10])
ax[1].set_title(sidgal + ", " + str(atree[0]['Orig_halo_id']))
#fig.show()
plt.savefig(wdir + "mergertrees/" + sidgal + '.png')
#plt.close()
plt.close()
###Output
/usr/local/lib/python3.4/dist-packages/IPython/kernel/__main__.py:42: RuntimeWarning: divide by zero encountered in log10
/usr/local/lib/python3.4/dist-packages/IPython/kernel/__main__.py:43: RuntimeWarning: divide by zero encountered in log10
/usr/lib/python3/dist-packages/matplotlib/pyplot.py:412: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_num_figures`).
max_open_warning, RuntimeWarning)
|
Notebooks/LoadApplicationPoints/loadCaseExample.ipynb | ###Markdown
Tutorial: computing load application points for CPACSThis exercise gives an overview on how to read and write CPACS data using the TiXI library and how the parametric CPACS geometry can be processed using the TiGL API and Open Cascade. The following topics are adressed:- loading CPACS data using [**TiXI**](https://github.com/DLR-SC/tixi)- extracting wing internal structre with [**TiGL API**](https://github.com/DLR-SC/tigl)- geome'try operation using [**TiGL API**](https://github.com/DLR-SC/tigl) functions- geometry operation using [**pythonOCC**](https://github.com/tpaviot/pythonocc)- writing CPACS data using [**TiXI**](https://github.com/DLR-SC/tixi) 1. Load CPACS dataFirst we import the TiXI 3 and [open](http://tixi.sourceforge.net/Doc/group__FileHandling.htmlga748c1c28c6d9ef0c80b9633ecc379672) the file `loadCaseExample.xml`:
###Code
from tixi3 import tixi3wrapper
# Instantiate TiXI
tixi_h = tixi3wrapper.Tixi3()
# Open the XML file
fname = 'input.xml'
error = tixi_h.open(fname)
if not error:
print('CPACS data set %s opended successfully.'%fname)
###Output
CPACS data set input.xml opended successfully.
###Markdown
Let's begin with a [schema validation](http://tixi.sourceforge.net/Doc/group__Validation.htmlgacdd3338ad8d7c0a1b8bbd50ec465b93e) before we proceed:
###Code
xsd_file = 'cpacs_schema.xsd'
error = tixi_h.schemaValidateFromFile(xsd_file)
if not error:
print('Hooray, the data set is valid to %s and we don\'t have to scold the input data provider :)'%xsd_file)
###Output
Hooray, the data set is valid to cpacs_schema.xsd and we don't have to scold the input data provider :)
###Markdown
[TiXI](https://github.com/DLR-SC/tixi) privdes an [online documentation](http://tixi.sourceforge.net/Doc/index.html) of the available C functions and a [Wiki](https://github.com/DLR-SC/tixi/wiki) with some examples and further explanations. In Python it is convenient to use the [`help()`](https://docs.python.org/3/library/functions.htmlhelp) command to directly get an overview of the implemented functions of the wrapped API.
###Code
help(tixi_h)
###Output
Help on Tixi3 in module tixi3.tixi3wrapper object:
class Tixi3(builtins.object)
| Methods defined here:
|
| __del__(self)
|
| __init__(self)
| Initialize self. See help(type(self)) for accurate signature.
|
| addBooleanElement(self, parentPath, elementName, boolean)
|
| addBooleanElementNS(self, parentPath, qualifiedName, namespaceURI, boolean)
|
| addCpacsHeader(self, name, creator, version, description, cpacsVersion)
|
| addDoubleAttribute(self, elementPath, attributeName, number, format)
|
| addDoubleElement(self, parentPath, elementName, number, format)
|
| addDoubleElementNS(self, parentPath, qualifiedName, namespaceURI, number, format)
|
| addDoubleListWithAttributes(self, parentPath, listName, childName, childAttributeName, values, format, attributes, nValues)
|
| addExternalLink(self, parentPath, url, fileFormat)
|
| addFloatVector(self, parentPath, elementName, vector, numElements, format)
|
| addHeader(self, toolName, version, authorName)
|
| addIntegerAttribute(self, elementPath, attributeName, number, format)
|
| addIntegerElement(self, parentPath, elementName, number, format)
|
| addIntegerElementNS(self, parentPath, qualifiedName, namespaceURI, number, format)
|
| addPoint(self, pointParentPath, x, y, z, format)
|
| addTextAttribute(self, elementPath, attributeName, attributeValue)
|
| addTextElement(self, parentPath, elementName, text)
|
| addTextElementAtIndex(self, parentPath, elementName, text, index)
|
| addTextElementNS(self, parentPath, qualifiedName, namespaceURI, text)
|
| addTextElementNSAtIndex(self, parentPath, qualifiedName, namespaceURI, text, index)
|
| checkAttribute(self, elementPath, attributeName)
| boolean return values from special return code is coded manually here
|
| checkDocumentHandle(self)
|
| checkElement(self, elementPath)
| boolean return values from special return code is coded manually here
|
| cleanup(self)
|
| close(self)
|
| closeAllDocuments(self)
|
| create(self, rootElementName)
|
| createElement(self, parentPath, elementName)
|
| createElementAtIndex(self, parentPath, elementName, index)
|
| createElementNS(self, parentPath, qualifiedName, namespaceURI)
|
| createElementNSAtIndex(self, parentPath, qualifiedName, index, namespaceURI)
|
| dTDValidate(self, DTDFilename)
|
| declareNamespace(self, elementPath, namespaceURI, prefix)
|
| exportDocumentAsString(self)
|
| getArray(self, arrayPath, elementName, arraySize)
|
| getArrayDimensionNames(self, arrayPath, dimensionNames_len)
|
| getArrayDimensionSizes(self, arrayPath, sizes_len)
|
| getArrayDimensionValues(self, arrayPath, dimension, dimensionValues_len)
|
| getArrayDimensions(self, arrayPath)
|
| getArrayElementCount(self, arrayPath, elementName)
|
| getArrayElementNames(self, arrayPath, elementType)
|
| getArrayParameterNames(self, arrayPath, parameterNames_len)
|
| getArrayParameters(self, arrayPath)
|
| getArrayValue(self, array, dimSize, dimPos, dims)
|
| getAttributeName(self, elementPath, attrIndex)
|
| getBooleanAttribute(self, elementPath, attributeName)
|
| getBooleanElement(self, elementPath)
|
| getChildNodeName(self, parentElementPath, index)
|
| getDocumentPath(self)
|
| getDoubleAttribute(self, elementPath, attributeName)
|
| getDoubleElement(self, elementPath)
|
| getFloatVector(self, vectorPath, eNumber)
|
| getIntegerAttribute(self, elementPath, attributeName)
|
| getIntegerElement(self, elementPath)
|
| getNamedChildrenCount(self, elementPath, childName)
|
| getNodeType(self, nodePath)
|
| getNumberOfAttributes(self, elementPath)
|
| getNumberOfChilds(self, elementPath)
|
| getPoint(self, pointParentPath)
|
| getPrintMsgFunc(self)
|
| getTextAttribute(self, elementPath, attributeName)
|
| getTextElement(self, elementPath)
|
| getVectorSize(self, vectorPath)
|
| getVersion(self)
|
| open(self, xmlInputFilename, recursive=False)
|
| openDocument(self, xmlFilename)
|
| openDocumentRecursive(self, xmlFilename, oMode)
|
| openHttp(self, httpURL)
|
| openString(self, xmlImportString)
|
| registerNamespace(self, namespaceURI, prefix)
|
| registerNamespacesFromDocument(self)
|
| removeAttribute(self, elementPath, attributeName)
|
| removeElement(self, elementPath)
|
| removeExternalLinks(self)
|
| renameElement(self, parentPath, oldName, newName)
|
| save(self, fileName, recursive=False, remove=False)
| Save the main tixi document.
| If the document was opened recursively,
| * 'recursive' tells to save linked nodes to their respecitve files, too.
| * 'remove' tells to remove the links to external files after saving the complete CPACS inclusively all linked content to the main file.
| You cannot have 'remove' without 'recursive'.
|
| saveAndRemoveDocument(self, xmlFilename)
|
| saveCompleteDocument(self, xmlFilename)
|
| saveDocument(self, xmlFilename)
|
| schemaValidateFromFile(self, xsdFilename)
|
| schemaValidateFromString(self, xsdString)
|
| schemaValidateWithDefaultsFromFile(self, xsdFilename)
|
| setCacheEnabled(self, enabled)
|
| setElementNamespace(self, elementPath, namespaceURI, prefix)
|
| swapElements(self, element1Path, element2Path)
|
| uIDCheckDuplicates(self)
|
| uIDCheckExists(self, uID)
|
| uIDCheckLinks(self)
|
| uIDGetXPath(self, uID)
|
| uIDSetToXPath(self, xPath, uID)
|
| updateBooleanElement(self, elementPath, boolean)
|
| updateDoubleElement(self, elementPath, number, format)
|
| updateFloatVector(self, path, vector, numElements, format)
|
| updateIntegerElement(self, elementPath, number, format)
|
| updateTextElement(self, elementPath, text)
|
| usePrettyPrint(self, usePrettyPrint)
|
| xPathEvaluateNodeNumber(self, xPathExpression)
|
| xPathExpressionGetTextByIndex(self, xPathExpression, elementNumber)
|
| xPathExpressionGetXPath(self, xPathExpression, index)
|
| xSLTransformationToFile(self, xslFilename, resultFilename)
|
| xSLTransformationToString(self, xslFilename)
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| lib = <CDLL 'libtixi3.so', handle 7fffcd164c00>
###Markdown
We will now read the required information from the CPACS data set. Since we already know that the data set is valid, we will find all information about the expected data in the [CPACS documentation](https://cpacs.de/pages/documentation.html) (he different ways to read and interpret a schema are explained in [tutorials from this workshop](https://github.com/DLR-SL/CPACS_Seminar/tree/master/HowTos)). The structure of the [`loadApplicationPointSets`](https://www.cpacs.de/documentation/CPACS_loadCases/html/75379068-a51b-aa5b-81fa-b0d3d4e41543.htm) can be represented as the following XSD diagram:First we want to check whether the optional [`loadReferenceLine`](https://www.cpacs.de/documentation/CPACS_loadCases/html/7a985b67-4f8a-bd38-f9bf-e2e20606e591.htm) exists and import the coordinates of its nodes.*Note: For the sake of simplicity, we make the assumptions that we expect our data in the first `loadApplicationPointSet` and that we can only process relative coordinates of the `loadReferenceLine`. Detailed error information (e.g., `else` conditions), as it should be considered in a tool implementation, is neglected as well.*
###Code
# Look for the first point set
xpath = '/cpacs/vehicles/aircraft/model[1]/analyses/global/loadApplicationPointSets/loadApplicationPointSet[1]'
if tixi_h.checkElement(xpath):
# Extract the component uID which points to the corresponding component segment
componentUID = tixi_h.getTextElement(xpath+'/componentUID')
# Check whether a reference line is given
xpath += '/loadReferenceLine'
if tixi_h.checkElement(xpath):
point_list = []
# Read the point list
for i in range(tixi_h.getNumberOfChilds(xpath)):
# If <eta> is given, then relative coordinates can be expected
point_xpath = xpath+'/loadReferencePoint[%i]'%(i+1)
if tixi_h.checkElement(point_xpath+'/eta'):
# Obligatory elements
eta = tixi_h.getDoubleElement(point_xpath+'/eta')
xsi = tixi_h.getDoubleElement(point_xpath+'/xsi')
referenceUID = tixi_h.getTextElement(point_xpath+'/referenceUID')
# <relHeight> is optional, so we set False to indicate that the value is not given
if tixi_h.checkElement(point_xpath+'/relHeight'):
relHeight = tixi_h.getDoubleElement(point_xpath+'/relHeight')
else:
relHeight = False
point_list.append([eta,xsi,relHeight,referenceUID])
print('Reference axis points:',*point_list, sep = "\n")
###Output
Reference axis points:
[0.0, 0.3, False, 'D150_iLOADS_W1_CompSeg1']
[0.12, 0.3, 0.5, 'D150_iLOADS_W1_CompSeg1']
[1.0, 0.4, 0.5, 'D150_iLOADS_W1_CompSeg1']
###Markdown
We now extracted a list of points defining a reference line. In a next step we want to compute the intersection of this reference line with a the ribs to specify proper load application points for structural analysis. 2. Intersection of reference line with ribs We will use the TiGL API for the basic geometry handling. The example will furthermore illustrate how to use Opencascade for individual geometry operations in case they are not implemented in TiGL. 2.1 Using TiGL to extract the geometry of wing, ribs and sparsLet's import `tigl3` and create an instance of TiGL3 class.
###Code
from tigl3 import tigl3wrapper
# Create instance of TiGL
tigl_h = tigl3wrapper.Tigl3()
tigl_h.open(tixi_h, '')
###Output
_____no_output_____
###Markdown
Load the configuration manager from `tigl3.configuration`:
###Code
import tigl3.configuration
# Load TiGL configuration manager and uID manager
mgr = tigl3.configuration.CCPACSConfigurationManager_get_instance()
aircraft_config = mgr.get_configuration(tigl_h._handle.value)
uid_mgr = aircraft_config.get_uidmanager()
###Output
_____no_output_____
###Markdown
Get the wing by its `uID`, the component segment by its index and retrieve the corresponding internal structure:
###Code
wing = uid_mgr.get_geometric_component('D150_iLOADS_W1')
component_segment = wing.get_component_segment(1)
wing_structure = component_segment.get_structure()
###Output
_____no_output_____
###Markdown
Now we can extract spars and ribs:
###Code
# List of spars
spars = []
for i in range(wing_structure.get_spar_segment_count()):
spars.append(wing_structure.get_spar_segment(i+1))
# List of rib sets and rib faces
rib_sets = []
rib_faces = []
for i in range(wing_structure.get_ribs_definition_count()):
print('reading rib set #%i ...'%(i+1))
rib_set = wing_structure.get_ribs_definition(i+1)
rib_sets.append(rib_set)
for j in range(rib_set.get_number_of_ribs()):
rib_faces.append(rib_set.get_rib_face(j+1))
print("\nDone with reading %i rib faces from %i rib sets!"%(len(rib_faces),i))
###Output
reading rib set #1 ...
reading rib set #2 ...
reading rib set #3 ...
reading rib set #4 ...
reading rib set #5 ...
reading rib set #6 ...
reading rib set #7 ...
Done with reading 31 rib faces from 6 rib sets!
###Markdown
Let's plot the result using the Open Cascade viewer. If you want to enter the event loop, i.e. using the mouse to modify the view, uncomment `start_display()`:
###Code
from OCC.Display.SimpleGui import init_display
display, start_display, add_menu, add_function_to_menu = init_display()
display.DisplayShape(wing.get_lower_shape(), transparency=0.5, update=True)
display.DisplayShape(wing.get_upper_shape(), transparency=0.5, update=True)
for spar in spars:
display.DisplayShape(spar.get_spar_geometry(), color="blue", update=True)
for i, rib_set in enumerate(rib_sets):
display.DisplayShape(rib_set.get_ribs_geometry(), color="blue", update=True)
# uncomment to enter the event loop
# start_display()
###Output
INFO:OCC.Display.backend:backend loaded: qt-pyqt5
INFO:OCC.Display.SimpleGui:GUI backend set to: qt-pyqt5
Layer manager created
Layer dimensions: 1024, 768
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
Many colors for color name blue, using first.
###Markdown
The result should look like this: 2.2 Converting relative component segment coordinates to absolute coordinates using the TiGL API There is no direct TiGL method to translate the relative component segment coordinates to absolute coordinates. But we can use the TiGL API to write our own function:
###Code
from OCC.gp import gp_Pnt
def get_abs_pnt(eta, xsi, relHeight, compUID):
# Get uIDs of the corresponding wing and segment
wing_uid, segm_uid = tigl_h.wingComponentSegmentPointGetSegmentEtaXsi(compUID,eta,xsi)[0:2]
# Get the wing and segment index from its uID
wing_index = tigl_h.wingGetIndex(wing_uid)
segm_index = tigl_h.wingGetSegmentIndex(segm_uid)[0]
if not relHeight:
# TiGL returns absolute point coordinates on the wing chord face at eta, xsi of the segment
pnt = tigl_h.wingGetChordPoint(wing_index,segm_index,eta,xsi)
else:
# Compute the unit normal vector to the chord face
chord_normal = np.array(tigl_h.wingGetChordNormal(wing_index, segm_index, eta, xsi))
e = chord_normal/np.linalg.norm(chord_normal)
# Get the upper and lower intersection with the wing surface
p_up = np.array(tigl_h.wingGetUpperPointAtDirection(wing_index, segm_index, eta, xsi, e[0], e[1], e[2])[0:3])
p_lo = np.array(tigl_h.wingGetLowerPointAtDirection(wing_index, segm_index, eta, xsi, e[0], e[1], e[2])[0:3])
# Translate the relHeight parameter into point coordinates
dist = np.linalg.norm(p_up-p_lo)
pnt = p_lo + relHeight*dist*e
# Return the result as gp_Pnt
return gp_Pnt(*pnt)
###Output
_____no_output_____
###Markdown
We call the above function for each point in our point list and write the results into a new list `abs_points`:
###Code
comp_uid = component_segment.get_uid()
abs_points = []
for point in point_list:
abs_points.append(get_abs_pnt(*point))
###Output
_____no_output_____
###Markdown
Using the `BRepBuilderAPI_MakeEdge` class we construct edges between the nodes:
###Code
from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeEdge
edges = []
for i in range(len(abs_points)-1):
edges.append(BRepBuilderAPI_MakeEdge(abs_points[i],abs_points[i+1]))
###Output
_____no_output_____
###Markdown
Let's plot the `loadReferenceLine` composed of the three point coordinates and the two edges with green color:
###Code
for pnt in abs_points:
display.DisplayShape(pnt, color="green", update=True)
for edge in edges:
display.DisplayShape(edge.Edge(), color="green", update=True)
# uncomment to enter the event loop
# start_display()
###Output
_____no_output_____
###Markdown
 2.3 Intersection of ribs and reference line using pythonOCCWe now have a shape for each of the ribs and the reference line. Next, we will write a routine to combine the edges to a curve and intersect this curve with the ribs. For this we will use the pythonOCC library directly. *Note: Threre is a [API documentation](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/) of pythonOCC. Furthermore, it is recommended to have a look at the [Python demos](https://github.com/tpaviot/pythonocc-demos) or the [C++ documentation](https://www.opencascade.com/doc/occt-6.9.1/refman/html/index.html) from which the Python functions are derived. Again, the `help()` command is very useful to get an overview of the possible member functions of a certain class.*The pythonOCC library usually offers several ways to get to the desired solution. In our case, we first combine the edges to a wire with [BRepBuilderAPI_MakeWire](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.BRepBuilderAPI.html?highlight=brepbuilderapi_makewireOCC.BRepBuilderAPI.BRepBuilderAPI_MakeWire):
###Code
from OCC.BRepBuilderAPI import BRepBuilderAPI_MakeWire
wire_h = BRepBuilderAPI_MakeWire(edges[0].Edge(), edges[1].Edge())
wire = wire_h.Wire()
###Output
_____no_output_____
###Markdown
From this wire we derive the curve in form of a spline with C0 continuity using [Approx_Curve3d](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.Approx.html?highlight=approx_curve3dOCC.Approx.Approx_Curve3d):
###Code
from OCC.BRepAdaptor import BRepAdaptor_CompCurve, BRepAdaptor_HCompCurve
from OCC.GeomAbs import GeomAbs_C0
from OCC.Approx import Approx_Curve3d
wireAdaptor = BRepAdaptor_CompCurve(wire)
curveAdaptor = BRepAdaptor_HCompCurve(wireAdaptor)
approx = Approx_Curve3d(curveAdaptor.GetHandle(), 1e-7, GeomAbs_C0, 5, 12)
curve = approx.Curve()
###Output
_____no_output_____
###Markdown
In the next step we use the [`GeomAPI_IntCS`](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.GeomAPI.html?highlight=geomapi_intcsOCC.GeomAPI.GeomAPI_IntCS) class to intersect the curve with the rib surfaces. Therefore, the rib faces are converted to surfaces via [`BRep_Tool`](https://cdn.rawgit.com/tpaviot/pythonocc-core/804f7f3/doc/apidoc/0.18.1/OCC.BRep.html?highlight=brep_toolOCC.BRep.BRep_Tool):
###Code
from OCC.GeomAPI import GeomAPI_IntCS
from OCC.BRep import BRep_Tool
intersector = GeomAPI_IntCS()
intersec_pnts = []
for rib_face in rib_faces:
face = BRep_Tool.Surface(rib_face)
intersector.Perform(curve, face)
for i in range(intersector.NbPoints()):
intersec_pnts.append(intersector.Point(i+1))
###Output
_____no_output_____
###Markdown
Let's plot the results as red points:
###Code
for pt in intersec_pnts:
display.DisplayShape(pt, color="red", update=True)
# uncomment to enter the event loop
start_display()
###Output
_____no_output_____
###Markdown
It should look like this:  3. Write load application points to CPACS We have now intersected our reference line with the internal wing structure and thus determined the load application points. Now let's write the results back to CPACS.A look into the [online documentation](https://www.cpacs.de/documentation/CPACS_loadCases/html/378cea43-6e5e-7f71-d037-9cc342ad0a05.htm) reveals the following data structure for this:```XML 1;2;... ..;.. ..;.. ..;.. ``` We could see from the documentation that the [coordinates](https://www.cpacs.de/documentation/CPACS_loadCases/html/8cbb5e0f-58aa-65db-8086-40d31330082d.htm) of the [load application points](https://www.cpacs.de/documentation/CPACS_loadCases/html/bf3688ab-60ba-701f-51d4-a76b2ab062ff.htm) are stored as [stringVectorBaseType](https://www.cpacs.de/documentation/CPACS_loadCases/html/32aea7db-266a-6dfa-5d16-f9b63c1e62a8.htm). Therefore we first create corresponding lists in Python:(*Note: documentation links must be adopted to the new proposal once released*)
###Code
id_vec, x_vec, y_vec, z_vec = [],[],[],[]
for i, pnt in enumerate(intersec_pnts):
id_vec.append(i+1)
x,y,z = pnt.Coord()
x_vec.append(x)
y_vec.append(y)
z_vec.append(z)
###Output
_____no_output_____
###Markdown
Using TiXI we [create](http://tixi.sourceforge.net/Doc/group__Elements.htmlga48de468f8e6b82bafff8465bed229068) the `loadApplicationPoints` element and [add the corresponding vectors](http://tixi.sourceforge.net/Doc/group__Elements.htmlgab3d822acc72ee8e15b5c43140db1de53):
###Code
# Create a childe-element 'loadApplicationCoordinates'
parentPath = '/cpacs/vehicles/aircraft/model[1]/analyses/global/loadApplicationPointSets/loadApplicationPointSet[1]'
tixi_h.createElement(parentPath, 'loadApplicationPoints')
# Add coordinate vectors
parentPath += '/loadApplicationPoints'
tixi_h.addFloatVector(parentPath, 'pointIDs', id_vec, len(id_vec), '%g')
tixi_h.addFloatVector(parentPath, 'x', x_vec, len(x_vec), '%.5f')
tixi_h.addFloatVector(parentPath, 'y', y_vec, len(y_vec), '%.5f')
tixi_h.addFloatVector(parentPath, 'z', z_vec, len(z_vec), '%.5f')
###Output
_____no_output_____
###Markdown
Finally the results are [written](http://tixi.sourceforge.net/Doc/group__FileHandling.htmlgaf1bedd335ae49ba7dc69836720b00372) to `output.xml`:
###Code
fname = 'output.xml'
error = tixi_h.saveDocument(fname)
if not error:
print("Data written successfully to %s."%fname)
###Output
Data written successfully to output.xml.
|
Chapter 0 - Foundations of Python/Flow control and looping.ipynb | ###Markdown
Flow control and looping *If...else* statement can be found in most mainstream programming languages to control the flow of your program's execution. With your predescribed conditions, your program is capable of handling different circumstances by corresponding treatment.The basic syntax is: if condition_expression: statement(s) elif condition_expression: statement(s) else: statement(s) Both elif and else are optional and multiple elif is allowed; it also provides short hand syntax for that: if condition_expression: statementor statement if condition_expression else statementRemember it, there is no curly-brackets in Python syntax for that and go checking the indentation if errors happened.Nested *if...else* is allowed that is using *if...else* as a response statement while a particular condition_expression is true.In order to judge the logical conditions from mathematics, Python uses the following symbols:- Equals: a==b- Not equals: a!=b- Less than or equal to: a<=b- Less than: a<b- Greater than or equal to: a>=b- great than: a>bSome logical operators can let you make a complex condition_expression by combining saveral simple ones:- *and* : expression1 and expression2- *or* : expression1 or expression2
###Code
math=4
english=3
if english < 4:
print('extra English class')
#output>>extra English class
# Any subject less than 4 should have extra class
#option 1
if english < 4 or math <4:
print('extra class')
#output>>extra class
#option 2
if english<4:
print('extra class')
elif math<4:
print('extra class')
#output>>extra class
print('extra class') if english<4 or math<4 else print('go playing your video game')
#output>>extra class
###Output
extra English class
extra class
extra class
extra class
###Markdown
for and while Loop for LoopThe for loop is applied for iterating over a iterable objects or traversal like we usually do in C/C++.The basic syntax is: for val in sequence: content of this loop else: statement for else part First of all, *else* part is just optional and rarely used.*sequence* can be a iterable object such as list or string, or a range() function.You can generate numbers from 0 to 9 by using range(10) or range(0,10,1) (start=0, end by=10, step size=1)
###Code
# The following three loops come out the same result: print numbers from 0 to 9 sequentially
re=[]
for i in range(10):
re.append(i)
print(re)
re.clear()
for i in range(0,10,1):
re.append(i)
print(re)
a=list(range(10))
print(a)
re.clear()
for i in a:
re.append(i)
print(re)
###Output
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
while LoopThe while loop is applied for iterating over a section of code until the testing condition is satisfied (being True). For those cases that you don't know an exact number of iterations for the repeating jobs, while loop might be a good choice.The basic syntax is: while testing_condition: statement(S) for while loop else: statement for else part The condition will be checked first, and any non-zero value would be considerred as *True*; also, None and 0 are interpreted as *False*. Same as for loop, the *else* part is optional and only execute once while the testing condition is False.
###Code
re=[]
n=10;i=1
while i<n:
re.append(i)
i=i+1 #(can also be expressed by i+=1)
print(re)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
break, continue, and pass*Break* can apllied for both for and while loop, you just put it in the statement (often found with if statement) and it will redirect your program to the outisde of the loop. In contrast to *break*, *contiune* will keep your program to stay in the loop. Finally, if you are looking for a statement which is going to do nothing, using *pass*.
###Code
re=[]
for i in range(10):
re.append(i)
if i==6:
print("i becomes",i," now, let's take a break!")
break
elif i==3:
print('i has been 3')
pass
else:
continue
print(re)
###Output
i has been 3
i becomes 6 now, let's take a break!
[0, 1, 2, 3, 4, 5, 6]
###Markdown
Iterating through tuple, list, string, and dictionary*enumerate()* is a very useful function we have to introduce in this part. It returns an enumerate object. It contains the index and value of all the items as a tuple.
###Code
S='abcd'
T=('a','b','c','d')
L=['a','b','c','d']
D=dict(zip(range(4),L))
# the following example adopts for sring, list, and tuple
# all the outputs are all the same
res=[]
#Case 1
for i in S:
res.append(i)
print('Result of Case 1: ', res)
res.clear()
#Case 2
for i in enumerate(S):
res.append(i)
print('Result of Case 2: ', res)
res.clear()
# for dictionary
#Case 4
for i in D:
res.append((i,D[i]))
print('Result of Case 4: ',res)
res.clear()
#Case 5
for i in enumerate(D): #be carefull! enumerate() will pair keys and index
res.append(i)
print('Result of Case 5: ',res)
res.clear()
#Case 6
for i in D.items():
res.append(i)
print('Result of Case 6: ',res)
###Output
Result of Case 1: ['a', 'b', 'c', 'd']
Result of Case 2: [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')]
Result of Case 4: [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')]
Result of Case 5: [(0, 0), (1, 1), (2, 2), (3, 3)]
Result of Case 6: [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'd')]
|
DAY 401 ~ 500/DAY440_[BaekJoon] ๋์ถฉ ๋ํด (Python).ipynb | ###Markdown
2021๋
8์ 1์ผ ์ผ์์ผ BaekJoon - ๋์ถฉ ๋ํด (Python) ๋ฌธ์ : https://www.acmicpc.net/problem/8949 ๋ธ๋ก๊ทธ : https://somjang.tistory.com/entry/BaekJoon-8949%EB%B2%88-%EB%8C%80%EC%B6%A9-%EB%8D%94%ED%95%B4-Python Solution
###Code
def daechung_sum(num1, num2):
result = []
num1_len, num2_len = len(num1), len(num2)
if num1_len > num2_len:
num2 = '0' * (num1_len - num2_len) + num2
else:
num1 = '0' * (num2_len - num1_len) + num1
max_len = max([num1_len, num2_len])
for i in range(max_len):
result.append(str(int(num1[i]) + int(num2[i])))
return "".join(result)
if __name__ == "__main__":
num1, num2 = input().split()
print(daechung_sum(num1, num2))
###Output
_____no_output_____ |
work/jupyter/new_feature.ipynb | ###Markdown
Assignment expresions(PEP572) ใปใคใฆใๆผ็ฎๅญ :=- python3.8ใฎๆฆ่ฆ๏ผใใฎ1๏ผ - Assignment expressions - https://atsuoishimoto.hatenablog.com/entry/2019/09/03/110508- ๅคๆฅใpythonใงใฏไปฃๅ
ฅใฏๆใงใใใจใใใ - a = 100- ใปใคใฆใๆผ็ฎๅญใงใฏใไปฃๅ
ฅใซๆผ็ฎใๅ
ฅใใใใ - a = (b := 50) + 50- ใใงใใ
###Code
a = (b := 50) + 50
print(a)
###Output
100
###Markdown
Positional-only parameters(PEP570) ้ขๆฐใฎไฝ็ฝฎๅฐ็จๅผๆฐ- python3.8ใฎๆฆ่ฆ๏ผใใฎ2๏ผ - - https://atsuoishimoto.hatenablog.com/entry/2019/09/06/115651- python3.0ใงใญใผใฏใผใๅฐ็จๅผๆฐใ"*"ใๅฉ็จใใฆๆๅฎใงใใใใใซ -> 1 - python3.8ใงไฝ็ฝฎๅฐ็จๅผๆฐใ"/"ใๅฉ็จใใฆๆๅฎใงใใใใใซใชใใพใใ -> 2
###Code
# 1
def func(a, b, *, c=1, d=2):
return a + b + c + d
func(1,2)
func(1,2,10)
def func1(a, b, /, *, c, d):
return a + b + c + d
func1(1,2,10,12)
func1(1, b=2)
###Output
_____no_output_____
###Markdown
Parallel filesystem cache for compliled bytecode files- ใใใฃใใ f-strings support = for self-documenting expressions and debugging- f-stringใง"="ใใตใใผใใใใใใใซ - ใใใพ่ชฟในใฆใพใใใใf-string้ขไฟใใๅคๆดใใใชใ- ๅผใฎ็ตๆใใตใใผใ
###Code
from datetime import date
user = "eric_idle"
member_since = date(1975, 7,31)
f"{user=} {member_since=}"
memb_days = date.today() - member_since
f"{user=} {memb_days=}"
###Output
_____no_output_____
###Markdown
Pickleใ5ใซ
###Code
a = [1,2,3,4,5]
for i in reversed(a):
print(i)
dict_a = {1: "test", 2: "test2", 3: "test3"}
for i in reversed(dict_a):
print(dict_a[i])
[(10, 20) (30, 40)]
cast ={input("role? "): input("actor? ")}
cast
type(True)
###Output
_____no_output_____ |
2_2_qiskit-quantum-state-classifier_circuits_and_computations.ipynb | ###Markdown
Classification of quantum states with high dimensional entanglement Circuits and computationsVersion compatible with 1st and 2d pilot studies
###Code
import numpy as np
import copy
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble
from qiskit.tools.visualization import *
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal,
CompleteMeasFitter, TensoredMeasFitter)
import json
from scipy.signal import savgol_filter
import time
from qiskit.tools.monitor import job_monitor
from o_utils import ora # classifier utilities
from o_plot import opl # utilities for result plot
from c_utils import new_cut # circuit building utilities
def json_dic_loader(dic_name):
f = open(data_directory+dic_name+'.json')
return json.load(f)
###Output
_____no_output_____
###Markdown
markdown for safety on demodef json_dic_dumper(dic, dic_name): with open(data_directory+dic_name+'.json', 'w') as f: json.dump(dic,f)
###Code
# common code for calling the classifier for ideal device and for real devices
def add_single_dic(target_data_list):
start_time = time.time()
print("started",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name,
"mitigation",mit_str,o_metric,model_name)
# added for D,S,M choice. Mainstream : mixed set of 20 states
first = 0
last = nb_states
if unique_char == "D":
last = int(nb_states/2)
elif unique_char == "S":
first = int(nb_states/2)
# get the classifier error curve in function of the number of shot and the "safe shot number"
error_curve, safe_rate, ernb = ora.provide_error_curve(PD_model=model_dic[model_name][first:last,:],
PD_test=PD_test[first:last,:],
trials=trials,
window=window,
epsilon=epsilon,
max_shots=max_shots,
pol=pol,
verbosality=verbosality)
tail = savgol_filter(ernb, window, pol, axis=0)
len_curve = len(error_curve)
safe_shot_nb = len_curve - int((window-1)/2) # OK
print('safe_shot_nb',safe_shot_nb, 'safe_rate',safe_rate, "nb trials:",trials)
err_rates = tail[int((window-1)/2),:]/trials
err_rate_max = np.max(err_rates)
err_rate_min = np.min(err_rates)
r=4
print("savgol interpolated error rate mean:", np.round(np.mean(err_rates),r),
"min:", np.round(err_rate_min,r),
"max:", np.round(err_rate_max,r), "for",
[ien for ien, jen in enumerate(err_rates) if jen == err_rate_max])
end_time = time.time()
#save the data in a list of dictionaries :
single_dic={"project":mitig_name,
"id_gates":id_gates,
"mitigation":mit_str,
"model":model_name,
"metric":o_metric, "device":project_device,
"curve_length":len_curve,
"shots": safe_shot_nb,
"shots_rate": safe_rate,
"error_curve":error_curve,
"trials":trials,"window":window,
"epsilon":epsilon,"SG_pol": pol,
"computation_time":end_time-start_time,
"time_completed":time.strftime('%d/%m/%Y %H:%M:%S'),
"trials":trials,
"QV": QV_dic[project_device],
"fidelity": fidelity_dic[project_device],
"error_nb":ernb}
target_data_list.append(single_dic)
print("completed",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name,
"mitigation",mit_str,o_metric,model_name,"\n")
###Output
_____no_output_____
###Markdown
Set up the simulator and layout for 5 qubits
###Code
simulator = Aer.get_backend('qasm_simulator')
#specify the layout of the devices
used_qubits = 5
qubit_list = [0,1,2,3,4]
#short_version = False
#program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+
program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+
Flag_char = "DS" # this for a mix of two types of separable states
if len(Flag_char) >= 2:
unique_char = "M"
else:
unique_char = Flag_char
# These dictionaries for the devices used in the study
if program_name == "QAD":
fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380,
"ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ideal_device': 1.0}
data_directory = "data_files/"
elif program_name == "AL2":
fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974,
"ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ideal_device': 1.0}
data_directory = "data2_files/"
QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0,
"ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf}
dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our",
"ibmqx2": 'Yor', 'ideal_device': "Ide"}
# specify the device: here first the ideal noise-free device
project_device = 'ideal_device'
device_name = dev_dic[project_device]
# specify the nb of id gates between state creation and measurements
# zero for the ideal device
id_gates = 0
str_nb_id = str(id_gates)
zfilled = str_nb_id.zfill(4-len(str_nb_id))
# tail of the file names for RAM storage
mitig_name = program_name + "_" + device_name
project_name = mitig_name + "_" + unique_char + zfilled
print(mitig_name)
print(project_name)
# establish the result label list
# meas_calibs will be used for mitigation in the real device section
qr = QuantumRegister(used_qubits)
meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal')
nb_labels=len(label_list)
print(nb_labels,label_list)
len(meas_calibs)
# permutation list
# here it is simple to write down the list,
# but a version using itertools will be wellcome for >5 qubits projects
if used_qubits == 5:
q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3],
[0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]]
else:
print("work in progress - meanwhile please provide the list of permutations")
###Output
_____no_output_____
###Markdown
Create the quantum states
###Code
# define the two subsets of 10 separable states
if program_name == "QAD":
state_1a = ["W","Phi+"]
state_1b = ["GHZ","Psi+"]
elif program_name == "ALT" or "AL2":
state_1a = ["W","Psi+"]
state_1b = ["Wbar","Phi+"]
l_states = state_1a+state_1b
l_states
# version 20 circuits for demonstration
# (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep")
# these circuits limited to state creation are ready to be saved
# for ultimately building circuits adapted to noisy simulator and real devices
# as option, these circuits will include a row of id gates between creation and measurements
circ_ori = []
for i_s in range(0,len(l_states),2):
for perm in q_perm:
mycircuit = QuantumCircuit(used_qubits, used_qubits)
mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1])
circ_ori.append(mycircuit)
# add measurement section to the circuit set newly created:
nb_states = len(circ_ori)
circ_ideal = copy.deepcopy(circ_ori)
for i_state in range(nb_states):
new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list)
ideal_dic = {}
###Output
_____no_output_____
###Markdown
Obtain result distributions on noise free simulator You may skip this section and go to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier"
###Code
# execute on noise free simulator
s_sim = 12000
job_simul = execute(circ_ideal, backend=simulator, shots=s_sim)
tot_results_simul = job_simul.result()
# establish a dictionary of count results on noise free simulator:
# (this step is only useful if ram storage is performed)
void_counts = dict(zip(label_list, np.zeros(2**used_qubits)))
tot_results_sim_dic = {}
for i_state in range(nb_states):
counts_simul = copy.deepcopy(void_counts)
counts_simul.update(tot_results_simul.get_counts(i_state))
ideal_dic[str(i_state)]=counts_simul
###Output
_____no_output_____
###Markdown
markdown for securityjson_dic_dumper(ideal_dic,"ideal_dic_"+project_name) Example of circuit for separable state of the first type for project 2 : $|W\rangle\otimes|\Psi^+\rangle$
###Code
i_state_test = 0
print(device_name, "circuit #",i_state_test)
circ_ideal[i_state_test].draw(output='mpl')
print(device_name, "circuit #",i_state_test)
plot_histogram(ideal_dic[str(i_state_test)],
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
###Output
Ide circuit # 0
###Markdown
Example of circuit for separable state of the second type for project 2 : $|W\rangle^{\otimes X}\otimes|\Phi^+\rangle$
###Code
i_state_test = 10
print(device_name, "circuit #",i_state_test)
circ_ideal[i_state_test].draw(output='mpl')
print(device_name, "circuit #",i_state_test)
plot_histogram(ideal_dic[str(i_state_test)],
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
###Output
Ide circuit # 10
###Markdown
Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
###Code
# try loading the dictionary of results if its creation was skipped
if len(ideal_dic) == 0:
ideal_dic = json_dic_loader("ideal_dic_"+project_name)
nb_states = len(ideal_dic)
nb_labels = len(list(ideal_dic.values())[0])
s_sim = sum(list(ideal_dic.values())[0].values())
def print_first_and_last_row(PDM):
print("first and last rows of the probability distribution matrix of dimension "+str(nb_states)+"x"+str(nb_labels))
print(np.round(PDM[0:1,:],4))
print(" ...")
print(np.round(PDM[-1:,:],4))
PD_ideal = np.ndarray((nb_states,nb_labels))
for i_state in range(nb_states):
PD_ideal[i_state, :] = list(ideal_dic[str(i_state)].values())
# now a little trick to get the ideal values from the simulator approximated values
with np.errstate(divide='ignore'): # ignore the divide by zero warning
PD_ideal = 1/np.round(s_sim/(PD_ideal))
# have a look at the matrix head and tail:
print_first_and_last_row(PD_ideal)
###Output
first and last rows of the probability distribution matrix of dimension 20x32
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1667
0.1667 0. 0.1667 0. 0. 0. 0. 0.1667 0.1667 0.
0.1667 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. ]]
...
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.1667 0. 0. 0.1667 0. 0. 0. 0.
0.1667 0. 0. 0.1667 0.1667 0. 0. 0.1667 0. 0.
0. 0. ]]
###Markdown
Monte Carlo simulation for the ideal device
###Code
# here will be appended the data we want for the curve plot
ideal_data_list=[]
###Output
_____no_output_____
###Markdown
you may skip this cell and get stored curves by running the next cell
###Code
# you may want to skip this cell as it will require a long time
# because of the high number of trials required by the Monte Carlo simulation for each nb o shots value
# the following values are defined in the study summary (readme file):
trials=100 # to be set to 10000 if not demo
window=5 # shorter window than for the real device counts
epsilon = .001
min_shots = 5
max_shots = 100
pol=2
subset = None # variable not used here
verbosality = 5 # printing step for intermediate results when increasing the experiment shot number
PD_test = PD_ideal
mitigation_dic = {"Na": None}
o_metrics_desired = ['jensenshannon', 'sqeuclidean']
model_dic = {"ideal_sim": PD_ideal}
for mit_str, mitigation in mitigation_dic.items():
if mitigation != None: # thus only for counts on real device
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
for o_metric in o_metrics_desired:
for model_name in model_dic.keys():
add_single_dic(ideal_data_list)
###Output
_____no_output_____
###Markdown
markdown for safetyjson_dic_dumper(ideal_data_list,"ideal_device_data_list_"+project_name)
###Code
# get the stored results of the Monte Carlo simulation in case you skipped the previous step
if len(ideal_data_list) == 0:
ideal_data_list = json_dic_loader("ideal_device_data_list_"+project_name)
# have a look at the mean error rate curves and error rate at save shot number n_s
# NB the r_hat_mean curves and legend reported r_hat_max errors the unsmoothed values
opl.plot_curves(ideal_data_list,np.array([0,1]),
"Jensen-Shannon vs squared euclidean distance - $\epsilon=0.001$" ,
["model"], ["device","metric"],
right_xlimit = 20, bottom_ylimit = -0.001, top_ylimit = 0.05)
###Output
_____no_output_____
###Markdown
Real device section
###Code
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
project_device = 'ibmq_valencia'# you may choice here a different backend
device_name = dev_dic[project_device]
mitig_name = program_name + "_" + device_name
print(mitig_name)
#determine here the backend
device = provider.get_backend(project_device) # the backend names are listed here above
properties = device.properties()
coupling_map = device.configuration().coupling_map
###Output
_____no_output_____
###Markdown
obtain mitigation filter markdown for demonb_shots_cal = 8192 set here the number of shots for the calibration phaseprint("backend:", device.name(), "qubit_list:", qubit_list)job_cal = execute(meas_calibs, backend=device, shots=nb_shots_cal)print(job_cal.job_id())job_monitor(job_cal)time_exp = time.strftime('%d/%m/%Y %H:%M:%S')print("DMY: ",time_exp) markdown for demohere we save mitigation resultscal_results = job_cal.result()cal_results_dic = cal_results.to_dict()to make date in dictionary serializable if there is a 'date' key:if 'date' in cal_results_dic.keys(): cal_results_dic['date']=str(cal_results_dic['date']) markdown for demo and securitydumpjson_dic_dumper(cal_results_dic,"cal_results_dic_"+ mitig_name)
###Code
# retrieve the corresponding measurement mitigation filter obtained at experimental time
# use a fake job because use of the from_dict method
simulator = Aer.get_backend('qasm_simulator')
fake_job_cal = execute(meas_calibs, backend=simulator, shots=1)
fake_cal_results = fake_job_cal.result()
cal_results_dic = json_dic_loader("cal_results_dic_"+mitig_name)
if 'date' in cal_results_dic.keys():
str(cal_results_dic['date'])
cal_results = fake_cal_results.from_dict(cal_results_dic)
meas_fitter = CompleteMeasFitter(cal_results, label_list, qubit_list=qubit_list, circlabel='mcal')
meas_filter = meas_fitter.filter
# have a look at the average measurement fidefily of this device:
print("Average Measurement Fidelity was: %f" % meas_fitter.readout_fidelity(), "for",project_device)
###Output
Average Measurement Fidelity was: 0.794262 for ibmq_valencia
###Markdown
Transpile the basic circuits for running on real deviceIn this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed).The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000'
###Code
id_gates = 0
str_nb_id = str(id_gates)
zfilled = str_nb_id.zfill(4-len(str_nb_id))
project_name = mitig_name + "_" + unique_char + zfilled
print(project_name)
# transpile
verbose = True
summary_dic = {}
seed_transpiler_list = list(range(nb_states))
real_circs = []
start_time = time.strftime('%d/%m/%Y %H:%M:%S')
print("Start at DMY: ",start_time)
for i_state in list(range(nb_states)):
# prepare circuit to be transpiled
circuit = copy.deepcopy(circ_ori[i_state])
if id_gates > 0:
circuit.barrier()
for id_gates_index in range(id_gates):
for index, value in enumerate(qubit_list):
circuit.id(value)
new_cut.add_barrier_and_measure(circuit, qubit_list)
summary = []
depth_list = []
Q_state_opt_new = transpile(circuit, backend=device,
coupling_map = coupling_map,
seed_transpiler=seed_transpiler_list[i_state],
optimization_level=2,
initial_layout=qubit_list)
summary_dic[i_state] = {"depth": Q_state_opt_new.depth(),
'circuit':Q_state_opt_new}
real_circs.append(Q_state_opt_new)
if verbose:
print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"],
"DMY: ",time.strftime('%d/%m/%Y %H:%M:%S'))
end_time = time.strftime('%d/%m/%Y %H:%M:%S')
print("Completed at DMY: ",end_time)
i_state_test = 10
print(project_device, "circuit #",i_state_test,
"circuit length:",real_circs[i_state_test].depth()) #summary_dic[i_state_test]['depth'])
# you may want to skip this if large nb of id gates before measurement
real_circs[i_state_test].draw(output='mpl')
#check a circuit on noise-free simulator
job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim)
print(project_device, "circuit #",i_state_test, "on noise free simulator")
plot_histogram(job_simul.result().get_counts(),
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
###Output
ibmq_valencia circuit # 10 on noise free simulator
###Markdown
run job markdown for demorun the circuitsnb_shots = 8192print("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_real = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_real_id = job_real.job_id()print("job id:", job_real_id)job_monitor(job_real)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_real_id)tot_results_real = job_real.result()empirical_dic ={}for i_state_count, state_count in enumerate(tot_results_real.get_counts()): empirical_dic[str(i_state_count)] = state_count markdown for safetyjson_dic_dumper(job_real_id,"job_real_id_"+ project_name) markdown for safety at demojson_dic_dumper(empirical_dic,"experimental_"+ project_name) markdown for demo2d JOB RUNnb_shots = 8192run the circuitsprint("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_test = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_test_id = job_test.job_id()print("job id:", job_test_id)job_monitor(job_test)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_test_id)tot_results_test = job_test.result()test_dic ={}for i_state_count, state_count in enumerate(tot_results_test.get_counts()): test_dic[str(i_state_count)] = state_count markdown for safety at demojson_dic_dumper(job_test_id,"job_test_id_"+ project_name)json_dic_dumper(test_dic,"test_"+ project_name) Load the transpiled circuits that were actually run legacy: valid only for the GHZ Psi+ / W Phi- combinationotherwise go instead to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier"
###Code
#changing keys of dictionary for merging:
def key_change(ini_dict, i_subset):
ini_list = []
len_ini = len(ini_dict)
for i in range(len_ini):
ini_list.append(str(i+i_subset*len_ini))
return dict(zip(ini_list, list(ini_dict.values())))
if program_name == "QAD":
#retrieve the data corresponding to the 1st project
lfc = list(Flag_char)
circ_ideal =[]
empirical_dic = {}
for i_subset, subset in enumerate(lfc):
qasm_circs_dic = json_dic_loader('qasm_circs_dic_QAD_'+device_name+'_'+ subset + zfilled)
j=0 # j included for project with several transpilation sessions for each device - not used here
qasm_circs = qasm_circs_dic[str(j)]
nb_circs = len(qasm_circs)
for i_circs in range(nb_circs):
circ_ideal.append(QuantumCircuit().from_qasm_str(qasm_circs[i_circs]))
empirical_dic = {**empirical_dic,
**key_change(json_dic_loader("experimental"+"_"+mitig_name +"_"\
+subset+zfilled), i_subset)}
test_dic = copy.deepcopy(empirical_dic)
#nb_states = len(circ_ideal)
###Output
_____no_output_____
###Markdown
Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
###Code
if program_name == "AL2":
empirical_dic = json_dic_loader('experimental_'+project_name)
test_dic = json_dic_loader('test_'+project_name)
def rectify_counts(tot_res, test_cqi,mitigation,m_filter) :
void_counts = dict(zip(label_list, np.zeros(2**used_qubits)))
try:
counts_results_real_test = tot_res[str(test_cqi)]
except KeyError as error:
counts_results_real_test = tot_res[test_cqi]
raw_counts_test = copy.deepcopy(void_counts)
raw_counts_test.update(counts_results_real_test)
if mitigation:
mitigated_results_test = meas_filter.apply(raw_counts_test, method = 'least_squares')
returned_counts = copy.deepcopy(void_counts)
returned_counts.update(mitigated_results_test)
else:
returned_counts = copy.deepcopy(raw_counts_test)
return returned_counts
###Output
_____no_output_____
###Markdown
Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
###Code
def get_clean_matrix(dic, mitigation,m_filter):
clean_matrix = np.ndarray((nb_states,nb_labels))
for i_state in range(nb_states):
rectified_counts = rectify_counts(dic,i_state, mitigation,m_filter) # get a rectified counts dictionary
clean_matrix[i_state, :] = list(rectified_counts.values())
clean_matrix = clean_matrix/clean_matrix.sum(axis=1, keepdims=True)
return clean_matrix
# We need to create a first matrix version. It will then vary for each considered set of distribution
mitigation = False
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
print_first_and_last_row(PD_exper)
if program_name == "QAD":
PD_test = copy.deepcopy(PD_exper)
elif program_name == "AL2":
mitigation = False
PD_test = get_clean_matrix(test_dic, mitigation=mitigation,
m_filter=meas_filter)
print_first_and_last_row(PD_test)
###Output
first and last rows of the probability distribution matrix of dimension 20x32
[[0.0046 0.0127 0.0114 0.0004 0.0165 0.0005 0.0004 0.0005 0.0319 0.0939
0.078 0.0017 0.1027 0.0043 0.0055 0.0056 0.0479 0.1379 0.1162 0.0044
0.1602 0.006 0.0068 0.007 0.0161 0.0391 0.032 0.0012 0.0476 0.0023
0.0018 0.0032]]
...
[[0.0112 0.0011 0.0031 0.0092 0.014 0.0031 0.0042 0.0157 0.0332 0.0076
0.0089 0.0248 0.141 0.0243 0.0243 0.1394 0.0138 0.0029 0.0023 0.0104
0.0656 0.0112 0.0123 0.0546 0.127 0.0189 0.0214 0.0896 0.0479 0.0081
0.0103 0.0389]]
###Markdown
Monte Carlo simulation for the real device
###Code
# here will be appended the data we want for the final plot of this notebook
empirical_data_list=[]
###Output
_____no_output_____
###Markdown
you may want to skip this cell and get stored curves by running the next cell
###Code
# you may want to skip this cell as it will require a long time
# because of the high number of trials required by the Monte Carlo simulation for each nb o shots value
# the following values are defined in the study summary notebook:
trials=100 # should be 1000 if not demo
window=11
epsilon = .001
max_shots = 500
pol=2
verbosality = 10 # printing step for intermediate results when increasing the experiment shot number
# In this section you can easily make your choice of combinations:
# mitigation or not, metric, model
mitigation_dic = {"no":False, "yes" : True}
#mitigation_dic = {"no":False}
#mitigation_dic = {"yes" : True}
o_metrics_desired = ['jensenshannon', 'sqeuclidean']
#o_metrics_desired = ['jensenshannon']
#o_metrics_desired = ['sqeuclidean']
model_dic = {"empirical": PD_exper, "ideal_sim": PD_ideal}
#model_dic = {"empirical": PD_exper}
#model_dic = {"ideal_sim": PD_ideal}
# Obtain a sequence of results in form of a list of dictionaries
for mit_str, mitigation in mitigation_dic.items():
# here we toggle PD_exper as we toggled mitigation status
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
PD_test = get_clean_matrix(test_dic, mitigation=mitigation,
m_filter=meas_filter)
for o_metric in o_metrics_desired:
print(project_name, model_dic.keys(), o_metric)
for model_name in model_dic.keys():
add_single_dic(empirical_data_list)
###Output
_____no_output_____
###Markdown
markdown fo securityjson_dic_dumper(empirical_data_list,'Tnemp_data_list_'+project_name)
###Code
# get the stored results of the Monte Carlo simulation in case you skipped the previous step
if len(empirical_data_list) == 0:
empirical_data_list = json_dic_loader('Nemp_data_list_'+project_name)
# have a look at the mean error rate curves and error rate at save shot number n_s
# NB the r_hat_mean curves and legend reported r_hat_max errors are the unsmoothed values
opl.plot_curves(ideal_data_list + empirical_data_list,
np.array(range(2+len(empirical_data_list))),
"$\epsilon=0.001$" , ["device"],
["model","metric","mitigation","id_gates"],
right_xlimit = 80, bottom_ylimit = -0.02, top_ylimit = 1)
import winsound
duration = 2000 # milliseconds
freq = 800 # Hz
winsound.Beep(freq, duration)
import qiskit.tools.jupyter
%qiskit_version_table
###Output
_____no_output_____
###Markdown
Classification of quantum states with high dimensional entanglement Circuits and computationsVersion compatible with 1st and 2d pilot studies
###Code
import numpy as np
import copy
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister, Aer, execute, transpile, assemble
from qiskit.tools.visualization import *
from qiskit.ignis.mitigation.measurement import (complete_meas_cal, tensored_meas_cal,
CompleteMeasFitter, TensoredMeasFitter)
import json
from scipy.signal import savgol_filter
import time
from qiskit.tools.monitor import job_monitor
from o_utils import ora # classifier utilities
from o_plot import opl # utilities for result plot
from c_utils import new_cut # circuit building utilities
def json_dic_loader(dic_name):
f = open(data_directory+dic_name+'.json')
return json.load(f)
###Output
_____no_output_____
###Markdown
markdown for safety on demodef json_dic_dumper(dic, dic_name): with open(data_directory+dic_name+'.json', 'w') as f: json.dump(dic,f)
###Code
# common code for calling the classifier for ideal device and for real devices
def add_single_dic(target_data_list):
start_time = time.time()
print("started",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name,
"mitigation",mit_str,o_metric,model_name)
# added for D,S,M choice. Mainstream : mixed set of 20 states
first = 0
last = nb_states
if unique_char == "D":
last = int(nb_states/2)
elif unique_char == "S":
first = int(nb_states/2)
# get the classifier error curve in function of the number of shot and the "safe shot number"
error_curve, safe_rate, ernb = ora.provide_error_curve(PD_model=model_dic[model_name][first:last,:],
PD_test=PD_test[first:last,:],
trials=trials,
window=window,
epsilon=epsilon,
max_shots=max_shots,
pol=pol,
verbosality=verbosality)
tail = savgol_filter(ernb, window, pol, axis=0)
len_curve = len(error_curve)
safe_shot_nb = len_curve - int((window-1)/2) # OK
print('safe_shot_nb',safe_shot_nb, 'safe_rate',safe_rate, "nb trials:",trials)
err_rates = tail[int((window-1)/2),:]/trials
err_rate_max = np.max(err_rates)
err_rate_min = np.min(err_rates)
r=4
print("savgol interpolated error rate mean:", np.round(np.mean(err_rates),r),
"min:", np.round(err_rate_min,r),
"max:", np.round(err_rate_max,r), "for",
[ien for ien, jen in enumerate(err_rates) if jen == err_rate_max])
end_time = time.time()
#save the data in a list of dictionaries :
single_dic={"project":mitig_name,
"id_gates":id_gates,
"mitigation":mit_str,
"model":model_name,
"metric":o_metric, "device":project_device,
"curve_length":len_curve,
"shots": safe_shot_nb,
"shots_rate": safe_rate,
"error_curve":error_curve,
"trials":trials,"window":window,
"epsilon":epsilon,"SG_pol": pol,
"computation_time":end_time-start_time,
"time_completed":time.strftime('%d/%m/%Y %H:%M:%S'),
"trials":trials,
"QV": QV_dic[project_device],
"fidelity": fidelity_dic[project_device],
"error_nb":ernb}
target_data_list.append(single_dic)
print("completed",time.strftime('%d/%m/%Y %H:%M:%S'),mitig_name,
"mitigation",mit_str,o_metric,model_name,"\n")
###Output
_____no_output_____
###Markdown
Set up the simulator and layout for 5 qubits
###Code
simulator = Aer.get_backend('qasm_simulator')
#specify the layout of the devices
used_qubits = 5
qubit_list = [0,1,2,3,4]
#short_version = False
#program_name="QAD" # 1st pilot project GHZ Psi+ / W Phi+
program_name="AL2" # 2d pilot project W Psi+ / Wbar Phi+
Flag_char = "DS" # this for a mix of two types of separable states
if len(Flag_char) >= 2:
unique_char = "M"
else:
unique_char = Flag_char
# These dictionaries for the devices used in the study
if program_name == "QAD":
fidelity_dic = {'ibmq_athens': 0.925110, 'ibmq_valencia': 0.809101, 'ibmq_ourense': 0.802380,
"ibmqx2": 0.627392, 'ibmq_santiago': 0.919399, 'ibmq_vigo': 0.908840, 'ideal_device': 1.0}
data_directory = "data_files/"
elif program_name == "AL2":
fidelity_dic = {'ibmq_athens': 0.910145, 'ibmq_valencia': 0.794262, 'ibmq_ourense': 0.818974,
"ibmqx2": 0.359528, 'ibmq_santiago': 0.900024, 'ibmq_vigo': 0.841831, 'ideal_device': 1.0}
data_directory = "data2_files/"
QV_dic = {'ibmq_athens': 32.0, 'ibmq_valencia': 16.0, 'ibmq_ourense': 8.0,
"ibmqx2": 8.0, 'ibmq_santiago': 32.0, 'ibmq_vigo': 16.0, 'ideal_device': np.inf}
dev_dic = {'ibmq_santiago': "San",'ibmq_athens': "Ath", 'ibmq_valencia': "Val", 'ibmq_vigo': 'Vig','ibmq_ourense': "Our",
"ibmqx2": 'Yor', 'ideal_device': "Ide"}
# specify the device: here first the ideal noise-free device
project_device = 'ideal_device'
device_name = dev_dic[project_device]
# specify the nb of id gates between state creation and measurements
# zero for the ideal device
id_gates = 0
str_nb_id = str(id_gates)
zfilled = str_nb_id.zfill(4-len(str_nb_id))
# tail of the file names for RAM storage
mitig_name = program_name + "_" + device_name
project_name = mitig_name + "_" + unique_char + zfilled
print(mitig_name)
print(project_name)
# establish the result label list
# meas_calibs will be used for mitigation in the real device section
qr = QuantumRegister(used_qubits)
meas_calibs, label_list = complete_meas_cal(qubit_list=qubit_list, qr=qr, circlabel='mcal')
nb_labels=len(label_list)
print(nb_labels,label_list)
len(meas_calibs)
# permutation list
# here it is simple to write down the list,
# but a version using itertools will be wellcome for >5 qubits projects
if used_qubits == 5:
q_perm = [[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 4, 2, 3], [0, 2, 3, 1, 4], [0, 2, 4, 1, 3],
[0, 3, 4, 1, 2], [1, 2, 3, 0, 4], [1, 2, 4, 0, 3], [1, 3, 4, 0, 2], [2, 3, 4, 0, 1]]
else:
print("work in progress - meanwhile please provide the list of permutations")
###Output
_____no_output_____
###Markdown
Create the quantum states
###Code
# define the two subsets of 10 separable states
if program_name == "QAD":
state_1a = ["W","Phi+"]
state_1b = ["GHZ","Psi+"]
elif program_name == "ALT" or "AL2":
state_1a = ["W","Psi+"]
state_1b = ["Wbar","Phi+"]
l_states = state_1a+state_1b
l_states
# version 20 circuits for demonstration
# (in the version run on real devices: two batches of 10 circuits, "shallow" and "deep")
# these circuits limited to state creation are ready to be saved
# for ultimately building circuits adapted to noisy simulator and real devices
# as option, these circuits will include a row of id gates between creation and measurements
circ_ori = []
for i_s in range(0,len(l_states),2):
for perm in q_perm:
mycircuit = QuantumCircuit(used_qubits, used_qubits)
mycircuit = new_cut.circuit_builder(mycircuit, perm, l_states[i_s],l_states[i_s+1])
circ_ori.append(mycircuit)
# add measurement section to the circuit set newly created:
nb_states = len(circ_ori)
circ_ideal = copy.deepcopy(circ_ori)
for i_state in range(nb_states):
new_cut.add_barrier_and_measure(circ_ideal[i_state],qubit_list)
ideal_dic = {}
###Output
_____no_output_____
###Markdown
Obtain result distributions on noise free simulator You may skip this section and go to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier"
###Code
# execute on noise free simulator
s_sim = 12000
job_simul = execute(circ_ideal, backend=simulator, shots=s_sim)
tot_results_simul = job_simul.result()
# establish a dictionary of count results on noise free simulator:
# (this step is only useful if ram storage is performed)
void_counts = dict(zip(label_list, np.zeros(2**used_qubits)))
tot_results_sim_dic = {}
for i_state in range(nb_states):
counts_simul = copy.deepcopy(void_counts)
counts_simul.update(tot_results_simul.get_counts(i_state))
ideal_dic[str(i_state)]=counts_simul
###Output
_____no_output_____
###Markdown
markdown for securityjson_dic_dumper(ideal_dic,"ideal_dic_"+project_name) Example of circuit for separable state of the first type ($W\otimes\Phi^+\; or\; W\otimes\Psi^+$):
###Code
i_state_test = 10
print(device_name, "circuit #",i_state_test)
circ_ideal[i_state_test].draw(output='mpl')
print(device_name, "circuit #",i_state_test)
plot_histogram(ideal_dic[str(i_state_test)],
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
###Output
Ide circuit # 10
###Markdown
Example of circuit for separable state of the second type ($GHZ\otimes\Psi^+ \; or\; \bar{W}\otimes\Phi^+$):
###Code
i_state_test = 10
print(device_name, "circuit #",i_state_test)
circ_ideal[i_state_test].draw(output='mpl')
print(device_name, "circuit #",i_state_test)
plot_histogram(ideal_dic[str(i_state_test)],
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
###Output
Ide circuit # 10
###Markdown
Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
###Code
# try loading the dictionary of results if its creation was skipped
if len(ideal_dic) == 0:
ideal_dic = json_dic_loader("ideal_dic_"+project_name)
nb_states = len(ideal_dic)
nb_labels = len(list(ideal_dic.values())[0])
s_sim = sum(list(ideal_dic.values())[0].values())
PD_ideal = np.ndarray((nb_states,nb_labels))
for i_state in range(nb_states):
PD_ideal[i_state, :] = list(ideal_dic[str(i_state)].values())
# now a little trick to get the ideal values from the simulator approximated values
with np.errstate(divide='ignore'): # ignore the divide by zero warning
PD_ideal = 1/np.round(s_sim/(PD_ideal))
# have a look at the matrix head and tail:
print("first and last state probability distributions:")
print(np.round(np.vstack((PD_ideal[0:1,:],PD_ideal[-1:,:])),4))
###Output
first and last state probability distributions:
[[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.1667
0.1667 0. 0.1667 0. 0. 0. 0. 0.1667 0.1667 0.
0.1667 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. ]
[0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0.1667 0. 0. 0.1667 0. 0. 0. 0.
0.1667 0. 0. 0.1667 0.1667 0. 0. 0.1667 0. 0.
0. 0. ]]
###Markdown
Monte Carlo simulation for the ideal device
###Code
# here will be appended the data we want for the curve plot
ideal_data_list=[]
###Output
_____no_output_____
###Markdown
you may skip this cell and get stored curves by running the next cell
###Code
# you may want to skip this cell as it will require a long time
# because of the high number of trials required by the Monte Carlo simulation for each nb o shots value
# the following values are defined in the study summary (readme file):
trials=100 # to be set to 10000 if not demo
window=5 # shorter window than for the real device counts
epsilon = .001
min_shots = 5
max_shots = 100
pol=2
subset = None # variable not used here
verbosality = 5 # printing step for intermediate results when increasing the experiment shot number
PD_test = PD_ideal
mitigation_dic = {"Na": None}
o_metrics_desired = ['jensenshannon', 'sqeuclidean']
model_dic = {"ideal_sim": PD_ideal}
for mit_str, mitigation in mitigation_dic.items():
if mitigation != None: # thus only for counts on real device
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
for o_metric in o_metrics_desired:
for model_name in model_dic.keys():
add_single_dic(ideal_data_list)
###Output
_____no_output_____
###Markdown
markdown for safetyjson_dic_dumper(ideal_data_list,"ideal_device_data_list_"+project_name)
###Code
# get the stored results of the Monte Carlo simulation in case you skipped the previous step
if len(ideal_data_list) == 0:
ideal_data_list = json_dic_loader("ideal_device_data_list_"+project_name)
# have a look at the mean error rate curves and error rate at save shot number n_s
# NB the r_hat_mean curves and legend reported r_hat_max errors the unsmoothed values
opl.plot_curves(ideal_data_list,np.array([0,1]),
"Jensen-Shannon vs squared euclidean distance - $\epsilon=0.001$" ,
["model"], ["device","metric"],
right_xlimit = 20, bottom_ylimit = -0.001, top_ylimit = 0.05)
###Output
_____no_output_____
###Markdown
Real device section
###Code
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
project_device = 'ibmq_valencia'# you may choice here a different backend
device_name = dev_dic[project_device]
mitig_name = program_name + "_" + device_name
print(mitig_name)
#determine here the backend
device = provider.get_backend(project_device) # the backend names are listed here above
properties = device.properties()
coupling_map = device.configuration().coupling_map
###Output
_____no_output_____
###Markdown
obtain mitigation filter markdown for demonb_shots_cal = 8192 set here the number of shots for the calibration phaseprint("backend:", device.name(), "qubit_list:", qubit_list)job_cal = execute(meas_calibs, backend=device, shots=nb_shots_cal)print(job_cal.job_id())job_monitor(job_cal)time_exp = time.strftime('%d/%m/%Y %H:%M:%S')print("DMY: ",time_exp) markdown for demohere we save mitigation resultscal_results = job_cal.result()cal_results_dic = cal_results.to_dict()to make date in dictionary serializable if there is a 'date' key:if 'date' in cal_results_dic.keys(): cal_results_dic['date']=str(cal_results_dic['date']) markdown for demo and securitydumpjson_dic_dumper(cal_results_dic,"cal_results_dic_"+ mitig_name)
###Code
# retrieve the corresponding measurement mitigation filter obtained at experimental time
# use a fake job because use of the from_dict method
simulator = Aer.get_backend('qasm_simulator')
fake_job_cal = execute(meas_calibs, backend=simulator, shots=1)
fake_cal_results = fake_job_cal.result()
cal_results_dic = json_dic_loader("cal_results_dic_"+mitig_name)
if 'date' in cal_results_dic.keys():
str(cal_results_dic['date'])
cal_results = fake_cal_results.from_dict(cal_results_dic)
meas_fitter = CompleteMeasFitter(cal_results, label_list, qubit_list=qubit_list, circlabel='mcal')
meas_filter = meas_fitter.filter
# have a look at the average measurement fidefily of this device:
print("Average Measurement Fidelity was: %f" % meas_fitter.readout_fidelity(), "for",project_device)
###Output
Average Measurement Fidelity was: 0.794262 for ibmq_valencia
###Markdown
Transpile the basic circuits for running on real deviceIn this demo, these are not the circuits which were actually run on real devices (not the same transpiler seed).The optimization level is set to 2 instead of 3 in real experiments, for speed and also because at this moment there is a transpiler error occuring for ibmqx2: 'Maximum iteration reached. max_iteration=1000'
###Code
id_gates = 0
str_nb_id = str(id_gates)
zfilled = str_nb_id.zfill(4-len(str_nb_id))
project_name = mitig_name + "_" + unique_char + zfilled
print(project_name)
# transpile
verbose = True
summary_dic = {}
seed_transpiler_list = list(range(nb_states))
real_circs = []
start_time = time.strftime('%d/%m/%Y %H:%M:%S')
print("Start at DMY: ",start_time)
for i_state in list(range(nb_states)):
# prepare circuit to be transpiled
circuit = copy.deepcopy(circ_ori[i_state])
if id_gates > 0:
circuit.barrier()
for id_gates_index in range(id_gates):
for index, value in enumerate(qubit_list):
circuit.id(value)
new_cut.add_barrier_and_measure(circuit, qubit_list)
summary = []
depth_list = []
Q_state_opt_new = transpile(circuit, backend=device,
coupling_map = coupling_map,
seed_transpiler=seed_transpiler_list[i_state],
optimization_level=2,
initial_layout=qubit_list)
summary_dic[i_state] = {"depth": Q_state_opt_new.depth(),
'circuit':Q_state_opt_new}
real_circs.append(Q_state_opt_new)
if verbose:
print("circuit %2i" % i_state,"length",summary_dic[i_state]["depth"],
"DMY: ",time.strftime('%d/%m/%Y %H:%M:%S'))
end_time = time.strftime('%d/%m/%Y %H:%M:%S')
print("Completed at DMY: ",end_time)
i_state_test = 10
print(project_device, "circuit #",i_state_test,
"circuit length:",real_circs[i_state_test].depth()) #summary_dic[i_state_test]['depth'])
# you may want to skip this if large nb of id gates before measurement
real_circs[i_state_test].draw(output='mpl')
#check a circuit on noise-free simulator
job_simul = execute(real_circs[i_state_test], backend=simulator, shots=s_sim)
print(project_device, "circuit #",i_state_test, "on noise free simulator")
plot_histogram(job_simul.result().get_counts(),
legend=['noise free simulation'],
color = "b", figsize=(10.,5.))
###Output
ibmq_valencia circuit # 10 on noise free simulator
###Markdown
run job markdown for demorun the circuitsnb_shots = 8192print("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_real = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_real_id = job_real.job_id()print("job id:", job_real_id)job_monitor(job_real)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_real_id)tot_results_real = job_real.result()empirical_dic ={}for i_state_count, state_count in enumerate(tot_results_real.get_counts()): empirical_dic[str(i_state_count)] = state_count markdown for safetyjson_dic_dumper(job_real_id,"job_real_id_"+ project_name) markdown for safety at demojson_dic_dumper(empirical_dic,"experimental_"+ project_name) markdown for demo2d JOB RUNnb_shots = 8192run the circuitsprint("backend:", device.name(), "qubit_list:", qubit_list)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp) job_test = execute(real_circs, backend=device, optimization_level=0, shots=nb_shots)job_test_id = job_test.job_id()print("job id:", job_test_id)job_monitor(job_test)time_exp = time.strftime('%d_%m_%Y_%H_%M_%S')print("DMY: ",time_exp, "job id:", job_test_id)tot_results_test = job_test.result()test_dic ={}for i_state_count, state_count in enumerate(tot_results_test.get_counts()): test_dic[str(i_state_count)] = state_count markdown for safety at demojson_dic_dumper(job_test_id,"job_test_id_"+ project_name)json_dic_dumper(test_dic,"test_"+ project_name) Load the transpiled circuits that were actually run legacy: valid only for the GHZ Psi+ / W Phi- combinationotherwise go instead to: "Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier"
###Code
#changing keys of dictionary for merging:
def key_change(ini_dict, i_subset):
ini_list = []
len_ini = len(ini_dict)
for i in range(len_ini):
ini_list.append(str(i+i_subset*len_ini))
return dict(zip(ini_list, list(ini_dict.values())))
if program_name == "QAD":
#retrieve the data corresponding to the 1st project
lfc = list(Flag_char)
circ_ideal =[]
empirical_dic = {}
for i_subset, subset in enumerate(lfc):
qasm_circs_dic = json_dic_loader('qasm_circs_dic_QAD_'+device_name+'_'+ subset + zfilled)
j=0 # j included for project with several transpilation sessions for each device - not used here
qasm_circs = qasm_circs_dic[str(j)]
nb_circs = len(qasm_circs)
for i_circs in range(nb_circs):
circ_ideal.append(QuantumCircuit().from_qasm_str(qasm_circs[i_circs]))
empirical_dic = {**empirical_dic,
**key_change(json_dic_loader("experimental"+"_"+mitig_name +"_"\
+subset+zfilled), i_subset)}
test_dic = copy.deepcopy(empirical_dic)
#nb_states = len(circ_ideal)
###Output
_____no_output_____
###Markdown
Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
###Code
if program_name == "AL2":
empirical_dic = json_dic_loader('experimental_'+project_name)
test_dic = json_dic_loader('test_'+project_name)
def rectify_counts(tot_res, test_cqi,mitigation,m_filter) :
void_counts = dict(zip(label_list, np.zeros(2**used_qubits)))
try:
counts_results_real_test = tot_res[str(test_cqi)]
except KeyError as error:
counts_results_real_test = tot_res[test_cqi]
raw_counts_test = copy.deepcopy(void_counts)
raw_counts_test.update(counts_results_real_test)
if mitigation:
mitigated_results_test = meas_filter.apply(raw_counts_test, method = 'least_squares')
returned_counts = copy.deepcopy(void_counts)
returned_counts.update(mitigated_results_test)
else:
returned_counts = copy.deepcopy(raw_counts_test)
return returned_counts
###Output
_____no_output_____
###Markdown
Obtain the matrix of probability distribution of shape(nb_state,nb_labels) used by the classifier
###Code
def get_clean_matrix(dic, mitigation,m_filter):
clean_matrix = np.ndarray((nb_states,nb_labels))
for i_state in range(nb_states):
rectified_counts = rectify_counts(dic,i_state, mitigation,m_filter) # get a rectified counts dictionary
clean_matrix[i_state, :] = list(rectified_counts.values())
clean_matrix = clean_matrix/clean_matrix.sum(axis=1, keepdims=True)
return clean_matrix
# We need to create a first matrix version. It will then vary for each considered set of distribution
mitigation = False
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
print("first and last state probability distributions:")
print(np.round(np.vstack((PD_exper[0:1,:],PD_exper[-1:,:])),3))
if program_name == "QAD":
PD_test = copy.deepcopy(PD_exper)
elif program_name == "AL2":
mitigation = False
PD_test = get_clean_matrix(test_dic, mitigation=mitigation,
m_filter=meas_filter)
print("first and last state probability distributions:")
print(np.round(np.vstack((PD_test[0:1,:],PD_test[-1:,:])),3))
###Output
first and last state probability distributions:
[[0.005 0.013 0.011 0. 0.016 0. 0. 0. 0.032 0.094 0.078 0.002
0.103 0.004 0.005 0.006 0.048 0.138 0.116 0.004 0.16 0.006 0.007 0.007
0.016 0.039 0.032 0.001 0.048 0.002 0.002 0.003]
[0.011 0.001 0.003 0.009 0.014 0.003 0.004 0.016 0.033 0.008 0.009 0.025
0.141 0.024 0.024 0.139 0.014 0.003 0.002 0.01 0.066 0.011 0.012 0.055
0.127 0.019 0.021 0.09 0.048 0.008 0.01 0.039]]
###Markdown
Monte Carlo simulation for the real device
###Code
# here will be appended the data we want for the final plot of this notebook
empirical_data_list=[]
###Output
_____no_output_____
###Markdown
you may want to skip this cell and get stored curves by running the next cell
###Code
# you may want to skip this cell as it will require a long time
# because of the high number of trials required by the Monte Carlo simulation for each nb o shots value
# the following values are defined in the study summary notebook:
trials=100 # should be 1000 if not demo
window=11
epsilon = .001
max_shots = 500
pol=2
verbosality = 10 # printing step for intermediate results when increasing the experiment shot number
# In this section you can easily make your choice of combinations:
# mitigation or not, metric, model
mitigation_dic = {"no":False, "yes" : True}
#mitigation_dic = {"no":False}
#mitigation_dic = {"yes" : True}
o_metrics_desired = ['jensenshannon', 'sqeuclidean']
#o_metrics_desired = ['jensenshannon']
#o_metrics_desired = ['sqeuclidean']
model_dic = {"empirical": PD_exper, "ideal_sim": PD_ideal}
#model_dic = {"empirical": PD_exper}
#model_dic = {"ideal_sim": PD_ideal}
# Obtain a sequence of results in form of a list of dictionaries
for mit_str, mitigation in mitigation_dic.items():
# here we toggle PD_exper as we toggled mitigation status
PD_exper = get_clean_matrix(empirical_dic, mitigation=mitigation,
m_filter=meas_filter)
PD_test = get_clean_matrix(test_dic, mitigation=mitigation,
m_filter=meas_filter)
for o_metric in o_metrics_desired:
print(project_name, model_dic.keys(), o_metric)
for model_name in model_dic.keys():
add_single_dic(empirical_data_list)
###Output
_____no_output_____
###Markdown
markdown fo securityjson_dic_dumper(empirical_data_list,'Tnemp_data_list_'+project_name)
###Code
# get the stored results of the Monte Carlo simulation in case you skipped the previous step
if len(empirical_data_list) == 0:
empirical_data_list = json_dic_loader('Nemp_data_list_'+project_name)
# have a look at the mean error rate curves and error rate at save shot number n_s
# NB the r_hat_mean curves and legend reported r_hat_max errors are the unsmoothed values
opl.plot_curves(ideal_data_list + empirical_data_list,
np.array(range(2+len(empirical_data_list))),
"$\epsilon=0.001$" , ["device"],
["model","metric","mitigation","id_gates"],
right_xlimit = 80, bottom_ylimit = -0.02, top_ylimit = 1)
import winsound
duration = 2000 # milliseconds
freq = 800 # Hz
winsound.Beep(freq, duration)
import qiskit.tools.jupyter
%qiskit_version_table
###Output
_____no_output_____ |
examples/Jupyter_notebook_examples/model_to_scripts/model2scripts.ipynb | ###Markdown
1. Define a model
###Code
p = Process("process")
p_con = p.Condition("Condition 1")
p1 = p_con.Process("process 1")
p2 = p_con.Process("process 2")
p_act1 = p1.Action("Action 1")
p_act2 = p2.Action("Action 2")
p_act3 = p.Action("Action 3")
show(p, width=400, height=400)
###Output
_____no_output_____
###Markdown
2 Generatre a script from themodel
###Code
sg = ScriptGenerator()
sg.run(p)
print(sg.script)
###Output
ID2014082256776 = Process('process')
ID2014082256712 = ID2014082256776.Condition('Condition 1')
ID2014082196360 = ID2014082256712.Process('process 1')
ID2014083412872 = ID2014082196360.Action('Action 1')
ID2014083412552 = ID2014082256712.Process('process 2')
ID2014083413192 = ID2014083412552.Action('Action 2')
ID2014083413448 = ID2014082256776.Action('Action 3')
|
jupyter-notebooks/ParameterSpace extraction from data.ipynb | ###Markdown
This notebook shows the functions provided by TINC to create parameter spaces and data pools from configuration and output files. There are two methods: * Using the output data * Using configuration files Using the output can be very convenient as you can create a DataPool automatically to explore the data, but there are cases where you might need to extract the parameter space from configuration files instead, in cases where the parameters are not present in the output data.
###Code
from tinc import *
%pylab inline
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
tl;dr Create datapools from outputYou can create a data pool directly from results using ```create_datapool_from_output()```. This function returns the datapool and parameter space extracted from the data files.
###Code
data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot'
dp,ps = create_datapool_from_output(data_dir, "results.json", ignore_params=['Beta'], debug = False)
figure(figsize=(12,4))
subplot(1,2, 1)
xlabel("T")
ps.get_dimension("param_chem_pot(a)").value = 3.8
plot(ps.get_parameter("T").values, dp.get_slice("<formation_energy>", "T"))
ps.get_parameter("param_chem_pot(a)").value = 3.9
plot(ps.get_parameter("T").values, dp.get_slice("<formation_energy>", "T"))
legend([3.8, 3.9])
subplot(1,2, 2)
xlabel("param_chem_pot(a)")
temp_values = linspace(100, 2800, 5)
for temp_val in temp_values:
ps.get_dimension("T").value = temp_val
plot(ps.get_parameter("param_chem_pot(a)").get_values(), dp.get_slice("<formation_energy>", "param_chem_pot(a)"), marker='o')
legend(temp_values);
###Output
Found more than one potential parameter in files:['Beta', 'T']
Using:T
###Markdown
Extracting from configuration filesTo extract a parameter space from configuration files, you must provide a data root directory and the name of the configuration file that can be found in subdirectories. This assumes all configuration file names are the same.You will also need to describe how to extract the information from the configuration files. This is done by specifying the keys where the parameter data is found. For example if the configuration files look like:```json{ "driver" : { "mode" : "incremental", "motif" : { "configname" : "restricted_auto", "_configname" : "SCEL1_1_1_1_0_0_0/0", "_configdof" : "$HOME/laptop_share/NbO_rocksalt_gs/mc_runs/fit_13.02/coarse_grid/set2_cooling_grid2/A_3.9B_-19.1/conditions.298/tto/final_state.json" }, "initial_conditions" : { "param_chem_pot" : { "a" : 3.90, "b" : -19.80 }, "temperature" : 20.0, "tolerance" : 0.001 }, "final_conditions" : { "param_chem_pot" : { "a" : 3.90, "b" : -19.80 }, "temperature" :2800.0, "tolerance" : 0.001 }, "incremental_conditions" : { "param_chem_pot" : { "a" : 0.0, "b" : 0.0 }, "temperature" : 10.0, "tolerance" : 0.001 } }}```You specify the starting value key as: ```driver/initial_conditions/*``` because the starting values are a list within the "driver" and " initial_conditions" keys. A similar string needs to be constructed for end and increment keys.Current limitations: * JSON only * space must be described by its boundaries and the incremement * Limited format to describe how to extract the information. Currently values must be leaf nodes. Extracting parameter space valuesThe function ```extract_parameter_space_data``` will extract the parameter values as a dictionary. This can be useful as a initial step to ensure values are being extracted correctly.
###Code
data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot'
config_file = 'mc_settings.json'
parameter_start_key = 'driver/initial_conditions/*'
parameter_end_key = 'driver/final_conditions/*'
parameter_increment_key = 'driver/incremental_conditions/*'
extract_parameter_space_data(data_dir, config_file, parameter_start_key, parameter_end_key, parameter_increment_key)
###Output
_____no_output_____
###Markdown
Creating parameter spacesThe ```make_parameter_space``` function returns a fully created parameter space from the configuration files. The only remaining step to perform to make the parameter space usable is to set the path template using ```set_current_path_template```. This template describes how the parameter values map to the filesystem.
###Code
data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot'
config_file = 'mc_settings.json'
parameter_start_key = 'driver/initial_conditions/*'
parameter_end_key = 'driver/final_conditions/*'
parameter_increment_key = 'driver/incremental_conditions/*'
ps = make_parameter_space(data_dir, config_file, parameter_start_key, parameter_end_key, parameter_increment_key, ps_name="casmParams")
ps.set_current_path_template("A_%%param_chem_pot(a)%%B_%%param_chem_pot(b)%%")
ps.print()
###Output
_____no_output_____
###Markdown
Data pools for the new parameter spaceOnce the parameter space has been extracted, a DataPool can be created to access the data across all the directories. After creating the data pool, you need to register the files that contain the data, in this case, the "results.json" file spans the temperature parameter. These data files must be located in the path defined through ```set_current_path_template()``` above.
###Code
dp = DataPoolJson("results", ps, "slice_dir")
dp.register_data_file("results.json", "temperature")
dp.get_current_files()
ps.get_current_relative_path()
###Output
_____no_output_____
###Markdown
You can query the fields available in the data files:
###Code
dp.list_fields()
ps.get_dimension("temperature").value = 1500
ps.get_dimension("param_chem_pot(b)").value = -19.9
###Output
_____no_output_____
###Markdown
You can request slices of data, from a single data file (temperature is the parameter contained in the individual files):
###Code
#dp.debug = True
ps.get_dimension("param_chem_pot(a)").value = 3.8
plot(ps.get_parameter("temperature").values, dp.get_slice("<formation_energy>", "temperature"))
ps.get_parameter("param_chem_pot(a)").value = 3.9
plot(ps.get_parameter("temperature").values, dp.get_slice("<formation_energy>", "temperature"))
legend(ps.get_dimension("param_chem_pot(a)").values)
len(dp.get_slice("<formation_energy>", "temperature")), len(ps.get_parameter('temperature').values)
###Output
_____no_output_____
###Markdown
Or you can request slices that take a single value from a number of results file, when the requested slicing dimension is a dimension that affects the current path:
###Code
ps.get_parameter("param_chem_pot(a)").values
dp.get_slice("<formation_energy>", "param_chem_pot(a)")
ps._path_template
temp_values = linspace(100, 2800, 10)
for temp_val in temp_values:
ps.get_dimension("temperature").value = temp_val
plot(ps.get_parameter("param_chem_pot(a)").values, dp.get_slice("<formation_energy>", "param_chem_pot(a)"), marker='o')
legend(temp_values);
len(dp.get_slice("<formation_energy>", "param_chem_pot(a)")), len(ps.get_parameter('param_chem_pot(a)').values)
###Output
_____no_output_____
###Markdown
Making parameter space from output filesYou can also extract parameter spaces from output files, to analyze output data, or when the input parameters are available in output data files.To do this, you must provide a root path and define a function that reads the a data file and returns a dictionary with the possible values each parameter can take according to the data file. The format should be the same provided by the ```extract_parameter_space_data``` function above.
###Code
def read_func(path):
with open(path) as f:
j = json.load(f)
return j
data_dir = r'C:\Users\Andres\source\repos\vdv_data\nbO_2chempot'
ps = extract_parameter_space_from_output(data_dir, "results.json", read_func)
ps.print()
###Output
_____no_output_____
###Markdown
If there are two potential parameters inside the result files, the first one found will be used. You can instruct which ones to ignore using the ```ignore_params=``` argument:
###Code
ps = extract_parameter_space_from_output(data_dir, "results.json", read_func, ignore_params=['Beta'])
ps.print()
###Output
_____no_output_____
###Markdown
Creating Datapools from output You can create a data pool directly from results using ```create_datapool_from_output()```. This function outputs the datapool and parameter space extracted from the data files.
###Code
dp,ps = create_datapool_from_output(data_dir, "results.json", read_func, ignore_params=['Beta'])
ps.get_parameter("param_chem_pot(b)").value, ps.get_parameter("param_chem_pot(b)").values, ps.get_parameter("param_chem_pot(b)").ids, ps.get_parameter("param_chem_pot(b)").get_space_stride()
dp.list_fields()
ps = dp.get_parameter_space()
ps.get_current_relative_path()
ps.get_root_path()
#dp.debug = True
plot(ps.get_parameter("T").get_values(), dp.get_slice("<formation_energy>", "T"));
ps.get_parameter("param_chem_pot(a)").ids
ps.get_current_relative_path()
ps.is_filesystem_dimension("param_chem_pot(a)")
ps.get_parameter("param_chem_pot(a)").values
ps.get_common_id([ps.get_parameter("param_chem_pot(a)"),ps.get_parameter("param_chem_pot(b)")], {'T': 0, 'param_chem_pot(a)': 0, 'param_chem_pot(b)': 0})
ps.is_filesystem_dimension('param_chem_pot(a)')
ps.resolve_template(ps._path_template, {'T': 0, 'param_chem_pot(a)': 3, 'param_chem_pot(b)': 0})
dp.get_slice("Beta", "param_chem_pot(a)")
set(ps.get_parameter("param_chem_pot(a)").values)
plot(ps.get_parameter("param_chem_pot(a)").get_values(), dp.get_slice("Beta", "param_chem_pot(a)"), marker='o')
###Output
_____no_output_____
###Markdown
TODO... Ignore below...
###Code
import netCDF4
ps_filename = "parameter_space.nc"
sub_dir = sub_dirs[0]
full_path = data_dir + subdir + ps_filename
ps_file = netCDF4.Dataset(full_path, "w", format="NETCDF4")
params = ps_file.createGroup("internal_parameters")
mapped_params = ps_file.createGroup("mapped_parameters")
index_params = ps_file.createGroup("index_parameters")
for param_name, space in param_space.items():
param_group = rootgrp.createVariable("values","f8",("internal_parameters",))
mapped_group = rootgrp.createVariable("values","f8",("mapped_parameters",))
mapped_var_ids = rootgrp.createVariable("ids","s",("mapped_parameters",))
index_group = rootgrp.createVariable("values","f8",("index_parameters",))
param_var = rootgrp.createVariable("values","f8",("internal_parameters",))
mapped_var = rootgrp.createVariable("values","f8",("mapped_parameters",))
mapped_var_ids = rootgrp.createVariable("ids","s",("mapped_parameters",))
index_params = rootgrp.createVariable("values","f8",("index_parameters",))
ps_file.close()
###Output
_____no_output_____
###Markdown
Testing single filesystem parameter:
###Code
data_dir = r'C:\Users\Andres\source\repos\vdv_data\MonteCarlo_0'
dp,ps = create_datapool_from_output(data_dir, "results.json", ignore_params=["Beta"] )
ps = dp.get_parameter_space()
ps.print()
ps._path_template
ps.get_current_relative_path()
dp.list_fields()
dp.get_slice("<formation_energy>", "T")
dp.get_slice("<formation_energy>", "param_chem_pot(a)")
ps.is_filesystem_dimension("T")
###Output
_____no_output_____ |
examples/reference/widgets/RangeSlider.ipynb | ###Markdown
The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range where events are throttled by `callback_throttle` value. Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``callback_policy``** (str, **DEPRECATED**): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')* **``callback_throttle``** (int, **DEPRECATED**): Number of milliseconds to pause between callback calls as the slider is moved* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___
###Code
range_slider = pn.widgets.RangeSlider(
name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01)
range_slider
###Output
_____no_output_____
###Markdown
``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets:
###Code
range_slider.value
###Output
_____no_output_____
###Markdown
ControlsThe `RangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
###Code
pn.Row(range_slider.controls(jslink=True), range_slider)
###Output
_____no_output_____
###Markdown
The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range throttled until mouseup Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``format``** (str, bokeh.models.TickFormatter): Formatter to apply to the slider value* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___
###Code
range_slider = pn.widgets.RangeSlider(
name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01)
range_slider
###Output
_____no_output_____
###Markdown
``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets:
###Code
range_slider.value
###Output
_____no_output_____
###Markdown
A custom format string or bokeh TickFormatter may be used to format the slider values:
###Code
from bokeh.models.formatters import PrintfTickFormatter
str_format = pn.widgets.RangeSlider(name='Distance', format='0.0a', start=100000, end=1000000)
tick_format = pn.widgets.RangeSlider(name='Distance', format=PrintfTickFormatter(format='%.3f m'))
pn.Column(str_format, tick_format)
###Output
_____no_output_____
###Markdown
ControlsThe `RangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
###Code
pn.Row(range_slider.controls(jslink=True), range_slider)
###Output
_____no_output_____
###Markdown
The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range where events are throttled by `callback_throttle` value. Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``format``** (str, bokeh.models.TickFormatter): Formatter to apply to the slider value* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___
###Code
range_slider = pn.widgets.RangeSlider(
name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01)
range_slider
###Output
_____no_output_____
###Markdown
``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets:
###Code
range_slider.value
###Output
_____no_output_____
###Markdown
A custom format string or bokeh TickFormatter may be used to format the slider values:
###Code
from bokeh.models.formatters import PrintfTickFormatter
str_format = pn.widgets.RangeSlider(name='Distance', format='0.0a', start=100000, end=1000000)
tick_format = pn.widgets.RangeSlider(name='Distance', format=PrintfTickFormatter(format='%.3f m'))
pn.Column(str_format, tick_format)
###Output
_____no_output_____
###Markdown
ControlsThe `RangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
###Code
pn.Row(range_slider.controls(jslink=True), range_slider)
###Output
_____no_output_____
###Markdown
The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``callback_policy``** (str): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')* **``callback_throttle``** (int): Number of milliseconds to pause between callback calls as the slider is moved* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___
###Code
range_slider = pn.widgets.RangeSlider(
name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01)
range_slider
###Output
_____no_output_____
###Markdown
``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets:
###Code
range_slider.value
###Output
_____no_output_____
###Markdown
The ``RangeSlider`` widget allows selecting a floating-point range using a slider with two handles.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``start``** (float): The range's lower bound* **``end``** (float): The range's upper bound* **``step``** (float): The interval between values* **``value``** (tuple): Tuple of upper and lower bounds of selected range* **``value_throttled``** (tuple): Tuple of upper and lower bounds of selected range where events are throttled by `callback_throttle` value. Display* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value* **``callback_policy``** (str, **DEPRECATED**): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')* **``callback_throttle``** (int): Number of milliseconds to pause between callback calls as the slider is moved* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.* **``tooltips``** (boolean): Whether to display tooltips on the slider handle___
###Code
range_slider = pn.widgets.RangeSlider(
name='Range Slider', start=0, end=math.pi, value=(math.pi/4., math.pi/2.), step=0.01)
range_slider
###Output
_____no_output_____
###Markdown
``RangeSlider.value`` returns a tuple of float values that can be read out and set like other widgets:
###Code
range_slider.value
###Output
_____no_output_____ |
recurrent-neural-network-lstm.ipynb | ###Markdown
Import Libraries
###Code
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Dropout, LSTM
from sklearn.metrics import mean_absolute_error
from tensorflow.keras import layers
from datetime import datetime
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
###Output
_____no_output_____
###Markdown
Import data
###Code
crypto_df = pd.read_csv("../input/g-research-crypto-forecasting/train.csv")
crypto_df.head()
asset_details = pd.read_csv('../input/g-research-crypto-forecasting/asset_details.csv')
asset_details
# Select Asset_ID = 6 for Ethereum
crypto_df = crypto_df[crypto_df["Asset_ID"]==6]
crypto_df.info(show_counts =True)
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1956200 entries, 5 to 24236799
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 timestamp 1956200 non-null int64
1 Asset_ID 1956200 non-null int64
2 Count 1956200 non-null float64
3 Open 1956200 non-null float64
4 High 1956200 non-null float64
5 Low 1956200 non-null float64
6 Close 1956200 non-null float64
7 Volume 1956200 non-null float64
8 VWAP 1956200 non-null float64
9 Target 1955860 non-null float64
dtypes: float64(8), int64(2)
memory usage: 164.2 MB
###Markdown
Preprocess data
###Code
df = crypto_df.copy()
# fill missing values
df = df.reindex(range(df.index[0],df.index[-1]+60,60),method='pad')
df = df.fillna(0)
# rename column timestamp to Date
df.rename({'timestamp': 'Date'}, axis=1, inplace=True)
# rename Close to Price
df.rename(columns={'Close': 'Price'}, inplace=True)
# timestamp conversion
df.Date = df.Date.apply(lambda d: datetime.fromtimestamp(int(d)).strftime('%Y-%m-%d'))
# set index
df.set_index('Date', inplace=True)
df.head()
# Convert to date array
timesteps = df.index.to_numpy()
prices = df['Price'].to_numpy()
timesteps[:10], prices[:10]
###Output
_____no_output_____
###Markdown
Modeling: Recurrent Neural Network LSTM
###Code
HORIZON = 1
WINDOW_SIZE = 7
# Function to create labelled window data
def get_labelled_windows(x, horizon=1):
return x[:, :-horizon], x[:, -horizon:]
# Test the window labelling function
test_window, test_label = get_labelled_windows(tf.expand_dims(tf.range(8)+1, axis=0), horizon=HORIZON)
print(f"Window: {tf.squeeze(test_window).numpy()} -> Label: {tf.squeeze(test_label).numpy()}")
# Function to view NumPy arrays as windows
def make_windows(x, window_size=7, horizon=1):
window_step = np.expand_dims(np.arange(window_size+horizon), axis=0)
window_indexes = window_step + np.expand_dims(np.arange(len(x)-(window_size+horizon-1)), axis=0).T
windowed_array = x[window_indexes]
windows, labels = get_labelled_windows(windowed_array, horizon=horizon)
return windows, labels
full_windows, full_labels = make_windows(prices, window_size=WINDOW_SIZE, horizon=HORIZON)
len(full_windows), len(full_labels)
# View the first 3 windows/labels
for i in range(3):
print(f"Window: {full_windows[i]} -> Label: {full_labels[i]}")
# View the last 3 windows/labels
for i in range(3):
print(f"Window: {full_windows[i-3]} -> Label: {full_labels[i-3]}")
# Function to create train-test-splits
def make_train_test_splits(windows, labels, test_split=0.2):
split_size = int(len(windows) * (1-test_split))
train_windows = windows[:split_size]
train_labels = labels[:split_size]
test_windows = windows[split_size:]
test_labels = labels[split_size:]
return train_windows, test_windows, train_labels, test_labels
train_windows, test_windows, train_labels, test_labels = make_train_test_splits(full_windows, full_labels)
len(train_windows), len(test_windows), len(train_labels), len(test_labels)
train_windows[:5], train_labels[:5]
import os
# Function to implement a ModelCheckpoint callback with a specific filename
def create_model_checkpoint(model_name, save_path="model_experiments"):
return tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(save_path, model_name),
verbose=0,
save_best_only=True)
tf.random.set_seed(42)
# LSTM model with the Functional API
inputs = layers.Input(shape=(WINDOW_SIZE))
x = layers.Lambda(lambda x: tf.expand_dims(x, axis=1))(inputs)
x = layers.LSTM(128, activation="relu")(x)
output = layers.Dense(HORIZON)(x)
lstm_model = tf.keras.Model(inputs=inputs, outputs=output, name="model_5_lstm")
# Compile model
lstm_model.compile(loss="mae",
optimizer=tf.keras.optimizers.Adam())
# Fit the model
lstm_model.fit(train_windows,
train_labels,
epochs=100,
verbose=0,
batch_size=128,
validation_data=(test_windows, test_labels),
callbacks=[create_model_checkpoint(model_name=lstm_model.name)])
# Load in best version of the LSTM model
lstm_model = tf.keras.models.load_model("model_experiments/model_5_lstm/")
lstm_model.evaluate(test_windows, test_labels)
def make_preds(model, input_data):
forecast = model.predict(input_data)
return tf.squeeze(forecast)
# Make predictions with our LSTM model
model_lstm_preds = make_preds(lstm_model, test_windows)
model_lstm_preds[:10]
###Output
_____no_output_____
###Markdown
Model Evaluation
###Code
def evaluate_preds(y_true, y_pred):
# Make sure float32 (for metric calculations)
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred = tf.cast(y_pred, dtype=tf.float32)
# Calculate various metrics
mae = tf.keras.metrics.mean_absolute_error(y_true, y_pred)
mse = tf.keras.metrics.mean_squared_error(y_true, y_pred)
rmse = tf.sqrt(mse)
mape = tf.keras.metrics.mean_absolute_percentage_error(y_true, y_pred)
return {"mae": mae.numpy(),
"mse": mse.numpy(),
"rmse": rmse.numpy(),
"mape": mape.numpy()}
# Evaluate LSTM model
model_lstm_results = evaluate_preds(y_true=tf.squeeze(test_labels),
y_pred=model_lstm_preds)
model_lstm_results
###Output
_____no_output_____ |
4a_GenerateRASA_Conversational_Data.ipynb | ###Markdown
Script- **Input:** FAQ generated in 2b and listing entities generated in 2d.- **Output:** auto-generated nlu.yml for RASA solution. - Approx 300K intents
###Code
root = './RASA_ConceptNet5/data/'
processed = './Data/processing/Processed_Airbnb/'
test = './Data/test'
###Output
_____no_output_____
###Markdown
Import
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Generate nlu.yml Gather FAQs
###Code
FAQ1 = pd.read_csv(processed+'FAQ_1.json')
FAQ2 = pd.read_csv(processed+'FAQ_2.json')
FAQ3 = pd.read_csv(processed+'FAQ_3.json')
FAQ = pd.concat([FAQ1, FAQ2, FAQ3], axis=0)
FAQ.head(1)
###Output
_____no_output_____
###Markdown
Add intent type
###Code
FAQ['intent'] = 'affirm_FAQ'
FAQ['type'] = 'intent'
FAQ.head(5)
###Output
_____no_output_____
###Markdown
Append entity and entity value to the intent.
###Code
#Remove ? from the question
#Append entity value and entity at the end and append '?' => [answer](wordid)?
FAQ['question'] = FAQ['question'].str.replace(r'?', ' ',regex=True)
FAQ.head(5)
# FAQ['question'] = FAQ['question'].astype(str) +'['+FAQ['answer'].astype(str)+'](wordid)?'
FAQ['question'] = FAQ['question'].astype(str)
FAQ.head(5)
###Output
_____no_output_____
###Markdown
Auto-generate NLU intents
###Code
nlus = FAQ.groupby('intent')['question'].apply(list).to_dict()
answer = FAQ["answer"].unique()
nlu=[]
query=[]
entity=[]
for val in answer:
questionString1="find me close matches for a place near "
questionString2="get me recommendations with features close to"
questionString3="I am looking for a place with facilities similar to "
questionString4="get me recommendations with facilities similar to "
questionString5="find similar recommendations like a "
questionString6="find places with amenities similar to "
questionString7="find places suitable for "
n = val
nlu.append(questionString1+"["+str(n)+"](wordid)")
nlu.append(questionString2+"["+str(n)+"](wordid)")
nlu.append(questionString3+"["+str(n)+"](wordid)")
nlu.append(questionString4+"["+str(n)+"](wordid)")
nlu.append(questionString5+"["+str(n)+"](wordid)")
nlu.append(questionString6+"["+str(n)+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
query.append(questionString3+" "+str(n))
query.append(questionString4+" "+str(n))
query.append(questionString5+" "+str(n))
query.append(questionString6+" "+str(n))
query.append(questionString7+" "+str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
# #;TYPE;INTENT;VALUE
# f = open(root+"nlu.yml","a")
# for intent in nlus:
# f.write("\n- intent: {}\n".format(intent))
# f.write(" examples: |\n")
# for value in nlus[intent]:
# f.write(" - {}\n".format(value))
# f.write("\n- intent: {}\n".format("affirm_neo4j_conceptNet5_review"))
# f.write(" examples: |\n")
# for value in nlu:
# f.write(" - {}\n".format(value.lower()))
# f.close()
###Output
_____no_output_____
###Markdown
Generate listings nlu
###Code
import csv
listing_reviews = pd.read_csv(processed+'listings_text_processed.csv')
listing_reviews = listing_reviews[['neighbourhood','neighbourhood_cleansed','property_type'
,'room_type','accommodates','bathrooms','bathrooms_text'
,'bedrooms','beds','amenities','price']]
neighborhood = listing_reviews['neighbourhood'].unique()
loc = listing_reviews['neighbourhood'].unique()
accomodates = listing_reviews['accommodates'].unique()
bathrooms = listing_reviews['bathrooms'].unique()
bedrooms = listing_reviews['bedrooms'].unique()
beds = listing_reviews['beds'].unique()
price = listing_reviews['price'].unique()
bathrooms_text = listing_reviews['bathrooms_text'].unique()
property_type = listing_reviews['property_type'].unique()
room_type = listing_reviews['room_type'].unique()
amenities = listing_reviews['amenities'].unique()
nlu = []
for val in loc:
questionString1="find me a place near "
questionString2="get me recommendations with "
questionString3="I am looking for a place with facilities like "
questionString4="get me recommendations with facilities to "
if ',' in val:
neigh = val.split(',')
for n in neigh:
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString3+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString4+"["+n.strip('\"').strip()+"](wordid)")
else:
n = val
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString3+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString4+"["+n.strip('\"').strip()+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
query.append(questionString3+" "+str(n))
query.append(questionString4+" "+str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
entity.append(str(n))
for val in neighborhood:
questionString1="find me a place in the neighborhood of "
questionString2="find me a place in "
if ',' in val:
neigh = val.split(',')
for n in neigh:
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
else:
n = val
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in accomodates:
questionString1="find me a place that can accomodate "
questionString2="find me a place for "
n = str(val)
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in bathrooms:
questionString1="find me a place with "
questionString2="looking for a place with "
n = str(val)
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) bathrooms")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) bathrooms")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in bedrooms:
questionString1="find me a place with "
questionString2="looking for a place with "
n = str(val)
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) bedrooms")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) bedrooms")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in beds:
questionString1="find me a place with "
questionString2="looking for a place with "
n = str(val)
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) beds")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) beds")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in price:
questionString1="find me a place in the range of "
questionString2="looking for a place within the price "
n = val
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in bathrooms_text:
questionString1="find me a place with bath that is "
questionString2="looking for a place with bath features like "
n = str(val)
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in amenities:
questionString1="find me a place with amenities like "
questionString2="I am looking for a place with amenities like "
val = val.strip('[').strip(']')
if ',' in val:
neigh = val.split(',')
for n in neigh:
nlu.append(questionString1+"["+n.replace('"', '').strip()+"](wordid)")
nlu.append(questionString2+"["+n.replace('"', '').strip()+"](wordid)")
else:
n = val
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid)")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid)")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in property_type:
questionString1="find me a "
questionString2="looking for a "
if ',' in val:
neigh = val.split(',')
for n in neigh:
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) property")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) property")
else:
n = val
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) property")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) property")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
for val in room_type:
questionString1="find me a "
questionString2="looking for a"
if ',' in val:
neigh = val.split(',')
for n in neigh:
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) room")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) room")
else:
n = val
nlu.append(questionString1+"["+n.strip('\"').strip()+"](wordid) room")
nlu.append(questionString2+"["+n.strip('\"').strip()+"](wordid) room")
query.append(questionString1+" "+str(n))
query.append(questionString2+" "+str(n))
entity.append(str(n))
entity.append(str(n))
# #;TYPE;INTENT;VALUE
# f = open(root+"nlu.yml","a")
# f.write("\n- intent: {}\n".format("affirm_neo4j_conceptNet5_listing"))
# f.write(" examples: |\n")
# for value in nlu:
# f.write(" - {}\n".format(value.lower()))
# f.close()
df = pd.DataFrame(list(zip(query, entity)),
columns =['user_query', 'entity'])
df
df.to_csv(test+'/test_user_queries.csv')
###Output
_____no_output_____ |
examples/double_dice.ipynb | ###Markdown
The double dice problemThis notebook demonstrates a way of doing simple Bayesian updates using the table method, with a Pandas DataFrame as the table.Copyright 2018 Allen DowneyMIT License: https://opensource.org/licenses/MIT
###Code
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
import numpy as np
import pandas as pd
from fractions import Fraction
###Output
_____no_output_____
###Markdown
The BayesTable classHere's the class that represents a Bayesian table.
###Code
class BayesTable(pd.DataFrame):
def __init__(self, hypo, prior=1, **options):
columns = ['hypo', 'prior', 'likelihood', 'unnorm', 'posterior']
super().__init__(columns=columns, **options)
self.hypo = hypo
self.prior = prior
def mult(self):
self.unnorm = self.prior * self.likelihood
def norm(self):
nc = np.sum(self.unnorm)
self.posterior = self.unnorm / nc
return nc
def update(self):
self.mult()
return self.norm()
def reset(self):
return BayesTable(self.hypo, self.posterior)
###Output
_____no_output_____
###Markdown
The double dice problemSuppose I have a box that contains one each of 4-sided, 6-sided, 8-sided, and 12-sided dice. I choose a die at random, and roll it twicewithout letting you see the die or the outcome. I report that I gotthe same outcome on both rolls.1) What is the posterior probability that I rolled each of the dice?2) If I roll the same die again, what is the probability that I get the same outcome a third time?**Solution**Here's a `BayesTable` that represents the four hypothetical dice.
###Code
hypo = [Fraction(sides) for sides in [4, 6, 8, 12]]
table = BayesTable(hypo)
###Output
_____no_output_____
###Markdown
Since we didn't specify prior probabilities, the default value is equal priors for all hypotheses. They don't have to be normalized, because we have to normalize the posteriors anyway.Now we can specify the likelihoods: if a die has `n` sides, the chance of getting the same outcome twice is `1/n`.So the likelihoods are:
###Code
table.likelihood = 1/table.hypo
table
###Output
_____no_output_____
###Markdown
Now we can use `update` to compute the posterior probabilities:
###Code
table.update()
table
table.posterior.astype(float)
###Output
_____no_output_____
###Markdown
The 4-sided die is most likely because you are more likely to get doubles on a 4-sided die than on a 6-, 8-, or 12- sided die. Part twoThe second part of the problem asks for the (posterior predictive) probability of getting the same outcome a third time, if we roll the same die again.If the die has `n` sides, the probability of getting the same value again is `1/n`, which should look familiar.To get the total probability of getting the same outcome, we have to add up the conditional probabilities:```P(n | data) * P(same outcome | n)```The first term is the posterior probability; the second term is `1/n`.
###Code
total = 0
for _, row in table.iterrows():
total += row.posterior / row.hypo
total
###Output
_____no_output_____
###Markdown
This calculation is similar to the first step of the update, so we can also compute it by1) Creating a new table with the posteriors from `table`.2) Adding the likelihood of getting the same outcome a third time.3) Computing the normalizing constant.
###Code
table2 = table.reset()
table2.likelihood = 1/table.hypo
table2
table2.update()
table2
###Output
_____no_output_____ |
Bears.ipynb | ###Markdown
Creating our own dataset from Google Images In this tutorial we will see how to easily create an image dataset through Google Images. Note: We will have to repeat these steps for any new category we want to Google (e.g once for dogs and once for cats).
###Code
from fastai.vision import *
###Output
_____no_output_____
###Markdown
Create directory and upload urls file into your serverChoosing an appropriate name for our labeled images. We can run these steps multiple times to create different labels.
###Code
!mkdir data
!mkdir data/bears
folder = 'black'
file = 'blk.txt'
folder = 'teddys'
file = 'ted.txt'
folder= 'brown'
file= 'brwnbears.txt'
path = Path('data/bears')
dest = path/folder
dest.mkdir(parents=True, exist_ok=True)
path = Path('data/bears')
###Output
_____no_output_____
###Markdown
Download imagesNow we will need to download our images from their respective urls.fast.ai has a function that allows us to do just that. We just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.Let's download our images! Notice we can choose a maximum number of images to be downloaded. In this case we will not download all the urls.We will need to run this line once for every category.
###Code
download_images(path/file, dest, max_pics=200)
classes = ['teddys','brown','black']
for c in classes:
print(c)
verify_images(path/c, delete=True, max_size=500)
###Output
teddys
###Markdown
View data
###Code
np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2,
ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(7,8))
data.classes, data.c, len(data.train_ds), len(data.valid_ds)
###Output
_____no_output_____
###Markdown
Train model
###Code
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.fit_one_cycle(5)
learn.save('stage-1')
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4))
learn.save('stage-2')
###Output
_____no_output_____
###Markdown
Interpretation
###Code
learn.load('stage-2')
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
###Output
_____no_output_____
###Markdown
Putting your model in productionFirst thing first, let's export the content of our Learner object for production:
###Code
learn.export()
defaults.device = torch.device('cpu')
img = open_image(path/'teddys'/'00000011.jpg')
img
learn = load_learner(path)
pred_class,pred_idx,outputs = learn.predict(img)
pred_class
###Output
_____no_output_____ |
module_2/module2.ipynb | ###Markdown
Module 2: From `datascience` to `pandas` Why `pandas`?Like the `datascience` package you learned and used in Data 8, `pandas` is a Python library used for data manipulation and analysis.However, `datascience` was developed as a pedagogical tool for Data 8, intended to help students become familiarized with python syntax as well as syntax associated with tabular data analysis. We decided to teach `datascience` first since its syntax is more intuitive and easier to use for students without much programming experience.On the other hand, `pandas` is an industrial strength package that is used in most data analysis projects in the real world. Learning how to use pandas would also make your projects easier to understand for other data scientists and extend the scope of influence your projects may have.Now that you've completed Data 8, it's a good time to translate the functions you've learned to `pandas`. Throughout this notebook, we will go over `pandas` by showing similar functions in both the `datascience` syntax and `pandas` syntax, as well as introduce you to some of the other functionalities that `pandas` provides. We will first import the packages we need. The following 3 lines imports `pandas`, `datascience`, and `numpy` respectively. Note that we imported `pandas` as `pd`, which means that when we call functions in `pandas`, we will always reference them with `pd` first.In Data 8, you saw something similar when we imported `numpy` as `np`: functions like taking `np.mean` or `np.random.choice` are all from the `numpy` package.
###Code
import pandas as pd # This is how we import pandas into the environment. Typically, we use pd to refer to all pandas modules for short.
from datascience import *
import numpy as np
###Output
_____no_output_____
###Markdown
Creating a Table Reading in a DatasetMost of the time, the data we want to analyze will be in a separate file, typically as a `.csv` file. In this case, we want to read the files in and convert them into a tabular format. Using the `datascience` package, we will want to read in the file as a table. To do this, we use the function `Table.read_table(file_path)`. In the example below, the `baby.csv` file is in the same folder as this notebook, so the relative file path from this notebook to the csv file is just `baby.csv`.
###Code
# datascience
baby_tbl = Table.read_table("baby.csv")
baby_tbl.show(5)
###Output
_____no_output_____
###Markdown
The syntax for reading in csv files in `pandas` is almost identical. `pandas` has a specific function to read in csv files called `pd.read_csv(file_path)`, with the same relative file path as its argument. the `dataframe.head()` function will display the first 5 rows of the data frame by default. If you want to specify the number of rows displayed, you can use`dataframe.head(num_rows)`.Similarly, if you want to see the last few rows of the data frame, you can use `dataframe.tail(num_rows)`. Try it out for yourself!
###Code
# pandas
baby_df = pd.read_csv("baby.csv")
baby_df.head(5)
###Output
_____no_output_____
###Markdown
Creating a Table from ScratchOften times, data in tabular format is preferred for analysis. But what if you already have the data, just not in tabular format? For example, if your data is scattered in arrays and lists, you can use that to build your own table from scratch. Using the `datascience` package, we can first create a blank table with a call to Table(), then add in the arrays as columns to this blank table as shown below.
###Code
# datascience
flowers_tbl = Table().with_columns(
'Number of petals', make_array(8, 34, 5),
'Name', make_array('lotus', 'sunflower', 'rose'),
'Color', make_array('pink', 'yellow', 'red')
)
flowers_tbl
###Output
_____no_output_____
###Markdown
There are multiple ways to do this in `pandas`. One way is intuitively very similar to how we did it with `datascience`. Here we use a dictionary object to represent the data -- don't worry if you're not familiar with them. We pass into the `data` argument `{"colname1": column1, "colname2": column2, ...}`.Notably, we build the `DataFrame` by the columns, using each list as a column and associating each column with its appropriate name.
###Code
# pandas - method 1
flowers_df = pd.DataFrame(data = {'Number of petals': [8, 34, 5],
'Name': ['lotus', 'sunflower', 'rose'],
'Color': ['pink', 'yellow', 'red']})
flowers_df
###Output
_____no_output_____
###Markdown
Another way to build the same table from scratch is by building it with rows. With this method, the data should be a list of lists, where each inner list contains the entries of one row. You might also notice that there is now a second argument, `columns`. Since we are passing in rows, we do not have the column names inside the `data` argument. This is why we use the `columns` argument to specify column names.
###Code
# pandas - method 2
flowers_df = pd.DataFrame(data = [[8, 'lotus', 'pink'],
[34, 'sunflower', 'yellow'],
[5, 'rose', 'red']],
columns = ['Number of petals', 'Name', 'Color'])
flowers_df
###Output
_____no_output_____
###Markdown
`pandas` datatypes Series vs. ArraysOne of the primary data types we saw when analyzing tabular data with the `datascience` package is the array.In `datascience`, the columns of the tables consisted of arrays. In `pandas`, there is a very similar, but slightly different data type called a `Series`. You can access the values of a column in a Table using the `tbl.column(column_name)` function as follows. When using `tbl.column`, the values of the selected column will be returned as an array.
###Code
## datascience
baby_tbl.column("Birth.Weight")
###Output
_____no_output_____
###Markdown
Similarly in `pandas`, you can access the values of a particular column by using `dataframe[column_name]`. The data frame object is introduced in the next section, for now you can just understand it as the `pandas` equivalent to a table. `dataframe[column_name]` will return a `Series` instead of an array.
###Code
## pandas
baby_df["Birth.Weight"]
###Output
_____no_output_____
###Markdown
A `Series` object is basically an array with indices for each data point. In the above example, the first element in the `Birth.Weight` column is the integer 120. The corresponding index is 0.If we want just the data as an array without the index, we can use the `Series.values` attribute.
###Code
baby_df["Birth.Weight"].values
###Output
_____no_output_____
###Markdown
DataFrames vs. TablesThe following is our standard `datascience` Table. It is basically a collection of arrays, with column names.
###Code
# datascience
baby_tbl.head()
###Output
_____no_output_____
###Markdown
A `pandas` `DataFrame` can be thought of as a collection of Series, all of which have the same index. The resulting `DataFrame` consists of columns where each column is a `Series` and each row has a unique index.
###Code
# pandas
baby_df.head()
###Output
_____no_output_____
###Markdown
The number of rows in the `Table` can be found as such:
###Code
# datascience
baby_tbl.num_rows
###Output
_____no_output_____
###Markdown
Similarly, for the number of columns:
###Code
# datascience
baby_tbl.num_columns
###Output
_____no_output_____
###Markdown
The number of rows and columns in a `DataFrame` can be accessed together using the `.shape` attribute. Notice that the index is not counted as a column.
###Code
# pandas
baby_df.shape
###Output
_____no_output_____
###Markdown
To get just the number of rows, we want the 0th element.
###Code
# pandas
baby_df.shape[0]
###Output
_____no_output_____
###Markdown
For just the number of columns, we want the 1st element.
###Code
# pandas
baby_df.shape[1]
###Output
_____no_output_____
###Markdown
Indices The row labels of a `DataFrame` are collectively called the index. It helps to identify each row. By default, the index values are the row numbers, with the first row having index 0.
###Code
# pandas
baby_df.head()
###Output
_____no_output_____
###Markdown
We can access the index of a `DataFrame` by calling `DataFrame.index`.
###Code
baby_df.index
###Output
_____no_output_____
###Markdown
That doesn't seem too meaningful. We can access the values of the index using `.values`.
###Code
# pandas
baby_df.index.values
###Output
_____no_output_____
###Markdown
In addition, we can set the index to whatever we want it to be. So, instead of index going from 0 to 1173, we can change it to go from 1 to 1174.
###Code
# pandas
baby_df.set_index(np.arange(1, 1175))
###Output
_____no_output_____
###Markdown
Let's look at another example.
###Code
flowers_df
###Output
_____no_output_____
###Markdown
The labels in an index of a `DataFrame` do not have to be intergers; they can also be strings. We can also use one of the data columns to be the index itself. Here is an example in which we are setting the index to be the `Name` column.
###Code
# pandas
flowers_df = flowers_df.set_index('Name')
flowers_df
###Output
_____no_output_____
###Markdown
Subsetting Data Selecting ColumnsSometimes the entire dataset contains too many columns, and we are only interested in some of the columns. In these situations, we would want to be able to select and display a subset of the columns from the original table. We dicuss some of these methods below. In the `datascience` package, the `select` function is used. If we want the returned columns to be a table as well, we use the syntax `tbl.select(col_name1, col_name2, ...)`
###Code
# datascience
# Selects the columns "Number of petals" and "Color".
flowers_tbl.select("Number of petals", "Name")
###Output
_____no_output_____
###Markdown
In `pandas`, there are many ways to achieve the same result. For one, we can use the function `loc`, shown below. The first argument of `loc` is which rows we want to select, and since we want all of the rows, just a colon ":" would indicate all rows. The second argument selects the columns we want. If we want more than one column, we need to pass in the column names as a list for the `loc` to return a dataframe object.
###Code
# pandas
# Selects all rows and the columns "Number of petals" and "Color".
flowers_df.loc[:,["Number of petals", "Color"]]
###Output
_____no_output_____
###Markdown
If you pass in a single column name as a list, it will still return a dataframe object with one column.
###Code
# pandas
# Selects all rows but only the column "Number of petals". Returns a DataFrame object.
flowers_df.loc[:,["Number of petals"]]
###Output
_____no_output_____
###Markdown
But if you pass in the column name as a string, pandas will recognize that you only have one column, and return a `Series` instead.
###Code
# pandas
# Selects all rows but only the column "Number of petals". Returns a series object.
flowers_df.loc[:,"Number of petals"]
###Output
_____no_output_____
###Markdown
Another way to subset data in `pandas` is to use `iloc`. Unlike `loc`, which accepts column names as arguments, `iloc` only accepts numerical indices as its arguments. The order of arguments remain the same, with the rows being the first argument and the columns being the second argument. Here the `0:1` denotes a range and means that we want all columns indexed 0 through 1. In Python, ranges are generally left inclusive and right exclusive (so that only column 0 is selected here).
###Code
# pandas
# Selects all rows and the 0th-1st column (not inclusive of the 1st column)
flowers_df.iloc[:,0:1]
###Output
_____no_output_____
###Markdown
As a side note, we can also subset tables' rows by selecting row indices. Since we've set the index of the flowers table to be the name of the flower, we can directly pass in the row indices as a list as the first argument. Note that here, `loc` is actually left and right inclusive.
###Code
# pandas
# Selects the rows with index values "sunflower" and "lotus", and all columns
flowers_df.loc[["sunflower", "lotus"], :]
# pandas
# Selects the rows with index values from 0 to 3 (inclusive of 0 and 3), and all columns
baby_df.loc[0:3,:]
###Output
_____no_output_____
###Markdown
`loc` and `iloc` are very powerful functions in `pandas`. Here are 2 more examples on the `baby` table, let's see what they do:
###Code
# pandas
# Selects the 0th and 2nd column, and all rows of the table
baby_df.iloc[:, [0,2]]
###Output
_____no_output_____
###Markdown
If we want to only select a subset of columns from the table, there exists a special case short-cut where we drop the `.loc` and `:` entirely:
###Code
# pandas
# Selects the columns "Birth.Weight" and "Maternal.Age" with all rows
baby_df[["Birth.Weight", "Maternal.Age"]]
###Output
_____no_output_____
###Markdown
Getting a ValueWhat if you want to single out one entry of your entire table? This often occurs when we want to max or min value after sorting the table, for example:- *What is the name of the flower with the most number of petals?*- *How heavy was the baby that went through the longest gestational days?* When we want a specific entry using the `datascience` package, we need to first use `tbl.column` to fetch an array out of a table, then use `.item` to retrieve the element's value. In the code below, we get the birth weight of the first baby recorded in this dataset.
###Code
# datascience
# Get the first item from the "Birth.Weight" column
baby_tbl.column("Birth.Weight").item(0)
###Output
_____no_output_____
###Markdown
In `pandas`, the syntax for getting a single element is a lot less verbose. Remember `loc` and `iloc`? Since these functions have the ability to subset rows and columns at the same time, we are going to use that functionality here. We pass in 0 as the row selector, since we only want the first entry of the "Birth.Weight" column (the entry at the 0th index).
###Code
# pandas
# Get the value at row with index 0 and column with label "Birth.Weight".
baby_df.loc[0,"Birth.Weight"]
###Output
_____no_output_____
###Markdown
Similarly with iloc, we are just passing in 0 as the first and second argument since we want the entry located at the first row and first column, which are both indexed at 0.
###Code
# pandas
# Get the value at the 0th row and 0th column.
baby_df.iloc[0,0]
# pandas
# Get the rows with indices 0 to 5 (inclusive) of the "Birth.Weight" column
baby_df.loc[0:5, "Birth.Weight"]
# pandas
# Select the first five columns of the first two rows
baby_df.iloc[0:2, 0:5]
###Output
_____no_output_____
###Markdown
Methods Filtering and Boolean Indexing With the `datascience` package, we can filter a table by only returning rows that satisfy a specific condition.
###Code
# datascience
# Returns all of the rows where "Birth.Weight" is greater than 120
baby_tbl.where('Birth.Weight', are.above(120))
###Output
_____no_output_____
###Markdown
Equivalently, we can do this in `pandas` by "boolean indexing". The expression below returns a boolean series where an entry is `True` if it satisfies the condition and `False` if it doesn't.
###Code
# pandas
baby_df['Birth.Weight'] > 120
###Output
_____no_output_____
###Markdown
If we want to filter our `pandas` dataframe for all rows that satisfies Birth.Weight > 120, we can pass the boolean series into the row argument of `.loc`. The idea is that we only want the rows where the "boolean index" is `True`.
###Code
# pandas
# Select all rows that are True in the boolean series baby_df['Birth.Weight'] > 120.
baby_df.loc[baby_df['Birth.Weight'] > 120, :]
###Output
_____no_output_____
###Markdown
Notably, `.loc` returns all columns by default so we can omit the column argument and get the same result.
###Code
# pandas
# Select all rows that are True in the boolean series baby_df['Birth.Weight'] > 120.
baby_df.loc[baby_df['Birth.Weight'] > 120]
###Output
_____no_output_____
###Markdown
Boolean indexing is a very popular way to conduct filtering in `pandas`. As such, there exists another special case short-ahnd where we don't need the `.loc` or the `:`.
###Code
# pandas
# Select all rows that are True in the boolean series baby_df['Birth.Weight'] > 120.
baby_df[baby_df['Birth.Weight'] > 120]
###Output
_____no_output_____
###Markdown
In general, a filtering expression of the form `tbl.where(column, predicate)` in the `datascience` library takes the form `df.loc[criterion]` in `pandas`. Here are a few more examples:
###Code
# datascience
# Return all rows where Maternal.Height is greater than or equal to 63.
baby_tbl.where('Maternal.Height', are.above_or_equal_to(63))
# pandas
# Return all rows where Maternal.Height is greater than or equal to 63.
baby_df[baby_df['Maternal.Height'] >= 63]
# datascience
# Return all rows where Maternal.Smoker is True.
baby_tbl.where('Maternal.Smoker', are.equal_to(True))
# pandas
# Return all rows where Maternal.Smoker is True.
baby_df.loc[baby_df['Maternal.Smoker'] == True]
###Output
_____no_output_____
###Markdown
Filtering on Multiple Conditions We can also filter on multiple conditions. If we want records (rows) where all of the conditions are true, we separate our criterion by the `&` symbol, where `&` represents *and*.`df.loc[(boolean series 1) & (boolean series 2) & (boolean series 2)]`If we just want one of the conditions to be true, we separate our criterion by `|` symbols, where `|` represents *or*.`df.loc[(boolean series 1) | (boolean series 2) | (boolean series 2)]`
###Code
# datascience
# Return all rows where Gestational.Days is between 270 and 280.
baby_tbl.where('Gestational.Days', are.between(270, 280))
# pandas
# Select all rows where Gestational.Days are above or equal to 270, but less than 280.
baby_df.loc[(baby_df['Gestational.Days'] >= 270) & (baby_df['Gestational.Days'] < 280)]
###Output
_____no_output_____ |
ml/recommendation-systems/recommendation-systems.ipynb | ###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
from urllib.request import urlretrieve
import zipfile
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each row $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': list(map(str, my_ratings['index'])),
'rating': list(map(float, my_ratings[1])),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model_lowinit)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, model_lowinit, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision_at_10 =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
from urllib.request import urlretrieve
import zipfile
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'gender', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each row $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': list(map(str, my_ratings['index'])),
'rating': list(map(float, my_ratings[1])),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model_lowinit)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
from urllib.request import urlretrieve
import zipfile
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': list(map(str, my_ratings['index'])),
'rating': list(map(float, my_ratings[1])),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model_lowinit)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, model_lowinit, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision_at_10 =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, model_lowinit, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision_at_10 =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
from urllib.request import urlretrieve
import zipfile
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each row $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': list(map(str, my_ratings['index'])),
'rating': list(map(float, my_ratings[1])),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model_lowinit)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, model_lowinit, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision_at_10 =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
import urllib
import zipfile
urllib.urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': map(str, my_ratings['index']),
'rating': map(float, my_ratings[1]),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model_lowinit)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, model_lowinit, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision_at_10 =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____
###Markdown
Copyright 2018 Google LLC.
###Code
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Recommendation Systems with TensorFlowThis Colab notebook complements the course on [Recommendation Systems](https://developers.google.com/machine-learning/recommendation/). Specifically, we'll be using matrix factorization to learn user and movie embeddings. IntroductionWe will create a movie recommendation system based on the [MovieLens](https://movielens.org/) dataset available [here](http://grouplens.org/datasets/movielens/). The data consists of movies ratings (on a scale of 1 to 5). Outline 1. Exploring the MovieLens Data (10 minutes) 1. Preliminaries (25 minutes) 1. Training a matrix factorization model (15 minutes) 1. Inspecting the Embeddings (15 minutes) 1. Regularization in matrix factorization (15 minutes) 1. Softmax model training (30 minutes) SetupLet's get started by importing the required packages.
###Code
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Install Altair and activate its colab renderer.
print("Installing Altair...")
!pip install git+git://github.com/altair-viz/altair.git
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
alt.renderers.enable('colab')
print("Done installing Altair.")
# Install spreadsheets and import authentication module.
USER_RATINGS = False
!pip install --upgrade -q gspread
from google.colab import auth
import gspread
from oauth2client.client import GoogleCredentials
###Output
_____no_output_____
###Markdown
We then download the MovieLens Data, and create DataFrames containing movies, users, and ratings.
###Code
# @title Load the MovieLens data (run this cell).
# Download MovieLens data.
print("Downloading movielens data...")
from urllib.request import urlretrieve
import zipfile
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-100k.zip", "movielens.zip")
zip_ref = zipfile.ZipFile('movielens.zip', "r")
zip_ref.extractall()
print("Done. Dataset contains:")
print(zip_ref.read('ml-100k/u.info'))
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
###Output
_____no_output_____
###Markdown
I. Exploring the Movielens DataBefore we dive into model building, let's inspect our MovieLens dataset. It is usually helpful to understand the statistics of the dataset. UsersWe start by printing some basic statistics describing the numeric user features.
###Code
users.describe()
###Output
_____no_output_____
###Markdown
We can also print some basic statistics describing the categorical user features
###Code
users.describe(include=[np.object])
###Output
_____no_output_____
###Markdown
We can also create histograms to further understand the distribution of the users. We use Altair to create an interactive chart.
###Code
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
###Output
_____no_output_____
###Markdown
Next, we look at the distribution of ratings per user. Clicking on an occupation in the right chart will filter the data by that occupation. The corresponding histogram is shown in blue, and superimposed with the histogram for the whole data (in light gray). You can use SHIFT+click to select multiple subsets.What do you observe, and how might this affect the recommendations?
###Code
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
###Output
_____no_output_____
###Markdown
MoviesIt is also useful to look at information about the movies and their ratings.
###Code
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
###Output
_____no_output_____
###Markdown
Finally, the last chart shows the distribution of the number of ratings and average rating.
###Code
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
###Output
_____no_output_____
###Markdown
II. PreliminariesOur goal is to factorize the ratings matrix $A$ into the product of a user embedding matrix $U$ and movie embedding matrix $V$, such that $A \approx UV^\top$ with$U = \begin{bmatrix} u_{1} \\ \hline \vdots \\ \hline u_{N} \end{bmatrix}$ and$V = \begin{bmatrix} v_{1} \\ \hline \vdots \\ \hline v_{M} \end{bmatrix}$.Here- $N$ is the number of users,- $M$ is the number of movies,- $A_{ij}$ is the rating of the $j$th movies by the $i$th user,- each row $U_i$ is a $d$-dimensional vector (embedding) representing user $i$,- each rwo $V_j$ is a $d$-dimensional vector (embedding) representing movie $j$,- the prediction of the model for the $(i, j)$ pair is the dot product $\langle U_i, V_j \rangle$. Sparse Representation of the Rating MatrixThe rating matrix could be very large and, in general, most of the entries are unobserved, since a given user will only rate a small subset of movies. For effcient representation, we will use a [tf.SparseTensor](https://www.tensorflow.org/api_docs/python/tf/SparseTensor). A `SparseTensor` uses three tensors to represent the matrix: `tf.SparseTensor(indices, values, dense_shape)` represents a tensor, where a value $A_{ij} = a$ is encoded by setting `indices[k] = [i, j]` and `values[k] = a`. The last tensor `dense_shape` is used to specify the shape of the full underlying matrix. Toy exampleAssume we have $2$ users and $4$ movies. Our toy ratings dataframe has three ratings,user\_id | movie\_id | rating--:|--:|--:0 | 0 | 5.00 | 1 | 3.01 | 3 | 1.0The corresponding rating matrix is$$A =\begin{bmatrix}5.0 & 3.0 & 0 & 0 \\0 & 0 & 0 & 1.0\end{bmatrix}$$And the SparseTensor representation is,```pythonSparseTensor( indices=[[0, 0], [0, 1], [1,3]], values=[5.0, 3.0, 1.0], dense_shape=[2, 4])``` Exercise 1: Build a tf.SparseTensor representation of the Rating Matrix.In this exercise, we'll write a function that maps from our `ratings` DataFrame to a `tf.SparseTensor`.Hint: you can select the values of a given column of a Dataframe `df` using `df['column_name'].values`.
###Code
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
A tf.SparseTensor representing the ratings matrix.
"""
# ========================= Complete this section ============================
# indices =
# values =
# ============================================================================
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
###Output
_____no_output_____
###Markdown
Calculating the errorThe model approximates the ratings matrix $A$ by a low-rank product $UV^\top$. We need a way to measure the approximation error. We'll start by using the Mean Squared Error of observed entries only (we will revisit this later). It is defined as$$\begin{align*}\text{MSE}(A, UV^\top)&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - (UV^\top)_{ij})^2} \\&= \frac{1}{|\Omega|}\sum_{(i, j) \in\Omega}{( A_{ij} - \langle U_i, V_j\rangle)^2}\end{align*}$$where $\Omega$ is the set of observed ratings, and $|\Omega|$ is the cardinality of $\Omega$. Exercise 2: Mean Squared ErrorWrite a TensorFlow function that takes a sparse rating matrix $A$ and the two embedding matrices $U, V$ and returns the mean squared error $\text{MSE}(A, UV^\top)$.Hints: * in this section, we only consider observed entries when calculating the loss. * a `SparseTensor` `sp_x` is a tuple of three Tensors: `sp_x.indices`, `sp_x.values` and `sp_x.dense_shape`. * you may find [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd) and [`tf.losses.mean_squared_error`](https://www.tensorflow.org/api_docs/python/tf/losses/mean_squared_error) helpful.
###Code
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
# ========================= Complete this section ============================
# loss =
# ============================================================================
return loss
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Note: One approach is to compute the full prediction matrix $UV^\top$, then gather the entries corresponding to the observed pairs. The memory cost of this approach is $O(NM)$. For the MovieLens dataset, this is fine, as the dense $N \times M$ matrix is small enough to fit in memory ($N = 943$, $M = 1682$).Another approach (given in the alternate solution below) is to only gather the embeddings of the observed pairs, then compute their dot products. The memory cost is $O(|\Omega| d)$ where $d$ is the embedding dimension. In our case, $|\Omega| = 10^5$, and the embedding dimension is on the order of $10$, so the memory cost of both methods is comparable. But when the number of users or movies is much larger, the first approach becomes infeasible.
###Code
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
###Output
_____no_output_____
###Markdown
Exercise 3 (Optional): adding your own ratings to the data set You have the option to add your own ratings to the data set. If you choose to do so, you will be able to see recommendations for yourself.Start by checking the box below. Running the next cell will authenticate you to your google Drive account, and create a spreadsheet, that contains all movie titles in column 'A'. Follow the link to the spreadsheet and take 3 minutes to rate some of the movies. Your ratings should be entered in column 'B'.
###Code
USER_RATINGS = True #@param {type:"boolean"}
# @title Run to create a spreadsheet, then use it to enter your ratings.
# Authenticate user.
if USER_RATINGS:
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# Create the spreadsheet and print a link to it.
try:
sh = gc.open('MovieLens-test')
except(gspread.SpreadsheetNotFound):
sh = gc.create('MovieLens-test')
worksheet = sh.sheet1
titles = movies['title'].values
cell_list = worksheet.range(1, 1, len(titles), 1)
for cell, title in zip(cell_list, titles):
cell.value = title
worksheet.update_cells(cell_list)
print("Link to the spreadsheet: "
"https://docs.google.com/spreadsheets/d/{}/edit".format(sh.id))
###Output
_____no_output_____
###Markdown
Run the next cell to load your ratings and add them to the main `ratings` DataFrame.
###Code
# @title Run to load your ratings.
# Load the ratings from the spreadsheet and create a DataFrame.
if USER_RATINGS:
my_ratings = pd.DataFrame.from_records(worksheet.get_all_values()).reset_index()
my_ratings = my_ratings[my_ratings[1] != '']
my_ratings = pd.DataFrame({
'user_id': "943",
'movie_id': list(map(str, my_ratings['index'])),
'rating': list(map(float, my_ratings[1])),
})
# Remove previous ratings.
ratings = ratings[ratings.user_id != "943"]
# Add new ratings.
ratings = ratings.append(my_ratings, ignore_index=True)
# Add new user to the users DataFrame.
if users.shape[0] == 943:
users = users.append(users.iloc[942], ignore_index=True)
users["user_id"][943] = "943"
print("Added your %d ratings; you have great taste!" % len(my_ratings))
ratings[ratings.user_id=="943"].merge(movies[['movie_id', 'title']])
###Output
_____no_output_____
###Markdown
III. Training a Matrix Factorization model CFModel (Collaborative Filtering Model) helper classThis is a simple class to train a matrix factorization model using stochastic gradient descent.The class constructor takes- the user embeddings U (a `tf.Variable`).- the movie embeddings V, (a `tf.Variable`).- a loss to optimize (a `tf.Tensor`).- an optional list of metrics dictionaries, each mapping a string (the name of the metric) to a tensor. These are evaluated and plotted during training (e.g. training error and test error).After training, one can access the trained embeddings using the `model.embeddings` dictionary.Example usage:```U_var = ...V_var = ...loss = ...model = CFModel(U_var, V_var, loss)model.train(iterations=100, learning_rate=1.0)user_embeddings = model.embeddings['user_id']movie_embeddings = model.embeddings['movie_id']```
###Code
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
###Output
_____no_output_____
###Markdown
Exercise 4: Build a Matrix Factorization model and train itUsing your `sparse_mean_square_error` function, write a function that builds a `CFModel` by creating the embedding variables and the train and test losses.
###Code
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
# ========================= Complete this section ============================
# A_train =
# A_test =
# ============================================================================
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# ============================================================================
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
###Output
_____no_output_____
###Markdown
Great, now it's time to train the model!Go ahead and run the next cell, trying different parameters (embedding dimension, learning rate, iterations). The training and test errors are plotted at the end of training. You can inspect these values to validate the hyper-parameters.Note: by calling `model.train` again, the model will continue training starting from the current values of the embeddings.
###Code
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=1000, learning_rate=10.)
###Output
_____no_output_____
###Markdown
The movie and user embeddings are also displayed in the right figure. When the embedding dimension is greater than 3, the embeddings are projected on the first 3 dimensions. The next section will have a more detailed look at the embeddings. IV. Inspecting the EmbeddingsIn this section, we take a closer look at the learned embeddings, by- computing your recommendations- looking at the nearest neighbors of some movies,- looking at the norms of the movie embeddings,- visualizing the embedding in a projected embedding space. Exercise 5: Write a function that computes the scores of the candidatesWe start by writing a function that, given a query embedding $u \in \mathbb R^d$ and item embeddings $V \in \mathbb R^{N \times d}$, computes the item scores.As discussed in the lecture, there are different similarity measures we can use, and these can yield different results. We will compare the following:- dot product: the score of item j is $\langle u, V_j \rangle$.- cosine: the score of item j is $\frac{\langle u, V_j \rangle}{\|u\|\|V_j\|}$.Hints:- you can use [`np.dot`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html) to compute the product of two np.Arrays.- you can use [`np.linalg.norm`](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.norm.html) to compute the norm of a np.Array.
###Code
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
# ========================= Complete this section ============================
# scores =
# ============================================================================
return scores
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
###Output
_____no_output_____
###Markdown
Equipped with this function, we can compute recommendations, where the query embedding can be either a user embedding or a movie embedding.
###Code
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=6):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=6):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
###Output
_____no_output_____
###Markdown
Your recommendationsIf you chose to input your recommendations, you can run the next cell to generate recommendations for you.
###Code
user_recommendations(model, measure=COSINE, k=5)
###Output
_____no_output_____
###Markdown
How do the recommendations look? Movie Nearest neighborsLet's look at the neareast neighbors for some of the movies.
###Code
movie_neighbors(model, "Aladdin", DOT)
movie_neighbors(model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
It seems that the quality of learned embeddings may not be very good. This will be addressed in Section V by adding several regularization techniques. First, we will further inspect the embeddings. Movie Embedding NormWe can also observe that the recommendations with dot-product and cosine are different: with dot-product, the model tends to recommend popular movies. This can be explained by the fact that in matrix factorization models, the norm of the embedding is often correlated with popularity (popular movies have a larger norm), which makes it more likely to recommend more popular items. We can confirm this hypothesis by sorting the movies by their embedding norm, as done in the next cell.
###Code
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
movie_embedding_norm(model)
###Output
_____no_output_____
###Markdown
Note: Depending on how the model is initialized, you may observe that some niche movies (ones with few ratings) have a high norm, leading to spurious recommendations. This can happen if the embedding of that movie happens to be initialized with a high norm. Then, because the movie has few ratings, it is infrequently updated, and can keep its high norm. This will be alleviated by using regularization.Try changing the value of the hyper-parameter `init_stddev`. One quantity that can be helpful is that the expected norm of a $d$-dimensional vector with entries $\sim \mathcal N(0, \sigma^2)$ is approximatley $\sigma \sqrt d$.How does this affect the embedding norm distribution, and the ranking of the top-norm movies?
###Code
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=1000, learning_rate=10.)
movie_neighbors(model_lowinit, "Aladdin", DOT)
movie_neighbors(model_lowinit, "Aladdin", COSINE)
movie_embedding_norm([model, model_lowinit])
###Output
_____no_output_____
###Markdown
Embedding visualizationSince it is hard to visualize embeddings in a higher-dimensional space (when the embedding dimension $k > 3$), one approach is to project the embeddings to a lower dimensional space. T-SNE (T-distributed Stochastic Neighbor Embedding) is an algorithm that projects the embeddings while attempting to preserve their pariwise distances. It can be useful for visualization, but one should use it with care. For more information on using t-SNE, see [How to Use t-SNE Effectively](https://distill.pub/2016/misread-tsne/).
###Code
tsne_movie_embeddings(model_lowinit)
###Output
_____no_output_____
###Markdown
You can highlight the embeddings of a given genre by clicking on the genres panel (SHIFT+click to select multiple genres).We can observe that the embeddings do not seem to have any notable structure, and the embeddings of a given genre are located all over the embedding space. This confirms the poor quality of the learned embeddings. One of the main reasons, which we will address in the next section, is that we only trained the model on observed pairs, and without regularization. V. Regularization In Matrix FactorizationIn the previous section, our loss was defined as the mean squared error on the observed part of the rating matrix. As discussed in the lecture, this can be problematic as the model does not learn how to place the embeddings of irrelevant movies. This phenomenon is known as *folding*.We will add regularization terms that will address this issue. We will use two types of regularization:- Regularization of the model parameters. This is a common $\ell_2$ regularization term on the embedding matrices, given by $r(U, V) = \frac{1}{N} \sum_i \|U_i\|^2 + \frac{1}{M}\sum_j \|V_j\|^2$.- A global prior that pushes the prediction of any pair towards zero, called the *gravity* term. This is given by $g(U, V) = \frac{1}{MN} \sum_{i = 1}^N \sum_{j = 1}^M \langle U_i, V_j \rangle^2$.The total loss is then given by$$\frac{1}{|\Omega|}\sum_{(i, j) \in \Omega} (A_{ij} - \langle U_i, V_j\rangle)^2 + \lambda _r r(U, V) + \lambda_g g(U, V)$$where $\lambda_r$ and $\lambda_g$ are two regularization coefficients (hyper-parameters). Exercise 6: Build a regularized Matrix Factorization model and train itWrite a function that builds a regularized model. You are given a function `gravity(U, V)` that computes the gravity term given the two embedding matrices $U$ and $V$.
###Code
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
# ========================= Complete this section ============================
# error_train =
# error_test =
# gravity_loss =
# regularization_loss =
# ============================================================================
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error': error_train,
'test_error': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
###Output
_____no_output_____
###Markdown
It is now time to train the regularized model! You can try different values of the regularization coefficients, and different embedding dimensions.
###Code
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=2000, learning_rate=20.)
###Output
_____no_output_____
###Markdown
Observe that adding the regularization terms results in a higher MSE, both on the training and test set. However, as we will see, the quality of the recommendations improves. This highlights a tension between fitting the observed data and minimizing the regularization terms. Fitting the observed data often emphasizes learning high similarity (between items with many interactions), but a good embedding representation also requires learning low similarity (between items with few or no interactions). Inspect the resultsLet's see if the results with regularization look better.
###Code
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
###Output
_____no_output_____
###Markdown
Hopefully, these recommendations look better. You can change the similarity measure from COSINE to DOT and observe how this affects the recommendations.Since the model is likely to recommend items that you rated highly, you have the option to exclude the items you rated, using `exclude_rated=True`.In the following cells, we display the nearest neighbors, the embedding norms, and the t-SNE projection of the movie embeddings.
###Code
movie_neighbors(reg_model, "Aladdin", DOT)
movie_neighbors(reg_model, "Aladdin", COSINE)
###Output
_____no_output_____
###Markdown
Here we compare the embedding norms for `model` and `reg_model`. Selecting a subset of the embeddings will highlight them on both charts simultaneously.
###Code
movie_embedding_norm([model, model_lowinit, reg_model])
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
###Output
_____no_output_____
###Markdown
We should observe that the embeddings have a lot more structure than the unregularized case. Try selecting different genres and observe how they tend to form clusters (for example Horror, Animation and Children). ConclusionThis concludes this section on matrix factorization models. Note that while the scale of the problem is small enough to allow efficient training using SGD, many practical problems need to be trained using more specialized algorithms such as Alternating Least Squares (see [tf.contrib.factorization.WALSMatrixFactorization](https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/WALSMatrixFactorization) for a TF implementation). VI. Softmax modelIn this section, we will train a simple softmax model that predicts whether a given user has rated a movie.**Note**: if you are taking the self-study version of the class, make sure to read through the part of the class covering Softmax training before working on this part.The model will take as input a feature vector $x$ representing the list of movies the user has rated. We start from the ratings DataFrame, which we group by user_id.
###Code
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
###Output
_____no_output_____
###Markdown
We then create a function that generates an example batch, such that each example contains the following features:- movie_id: A tensor of strings of the movie ids that the user rated.- genre: A tensor of strings of the genres of those movies- year: A tensor of strings of the release year.
###Code
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return pd.DataFrame.from_dict(x).fillna(fill).values
movie = []
year = []
genre = []
label = []
for movie_ids in ratings["movie_id"].values:
movie.append(movie_ids)
genre.append([x for movie_id in movie_ids for x in genres_dict[movie_id]])
year.append([years_dict[movie_id] for movie_id in movie_ids])
label.append([int(movie_id) for movie_id in movie_ids])
features = {
"movie_id": pad(movie, ""),
"year": pad(year, ""),
"genre": pad(genre, ""),
"label": pad(label, -1)
}
batch = (
tf.data.Dataset.from_tensor_slices(features)
.shuffle(1000)
.repeat()
.batch(batch_size)
.make_one_shot_iterator()
.get_next())
return batch
def select_random(x):
"""Selectes a random elements from each row of x."""
def to_float(x):
return tf.cast(x, tf.float32)
def to_int(x):
return tf.cast(x, tf.int64)
batch_size = tf.shape(x)[0]
rn = tf.range(batch_size)
nnz = to_float(tf.count_nonzero(x >= 0, axis=1))
rnd = tf.random_uniform([batch_size])
ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)
return to_int(tf.gather_nd(x, ids))
###Output
_____no_output_____
###Markdown
Loss functionRecall that the softmax model maps the input features $x$ to a user embedding $\psi(x) \in \mathbb R^d$, where $d$ is the embedding dimension. This vector is then multiplied by a movie embedding matrix $V \in \mathbb R^{m \times d}$ (where $m$ is the number of movies), and the final output of the model is the softmax of the product$$\hat p(x) = \text{softmax}(\psi(x) V^\top).$$Given a target label $y$, if we denote by $p = 1_y$ a one-hot encoding of this target label, then the loss is the cross-entropy between $\hat p(x)$ and $p$. Exercise 7: Write a loss function for the softmax model.In this exercise, we will write a function that takes tensors representing the user embeddings $\psi(x)$, movie embeddings $V$, target label $y$, and return the cross-entropy loss.Hint: You can use the function [`tf.nn.sparse_softmax_cross_entropy_with_logits`](https://www.tensorflow.org/api_docs/python/tf/nn/sparse_softmax_cross_entropy_with_logits), which takes `logits` as input, where `logits` refers to the product $\psi(x) V^\top$.
###Code
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A sparse tensor of dense_shape [batch_size, 1], such that
labels[i] is the target label for example i.
Returns:
The mean cross-entropy loss.
"""
# ========================= Complete this section ============================
# logits =
# loss =
# ============================================================================
return loss
# @title Solution
def softmax_loss(user_embeddings, movie_embeddings, labels):
"""Returns the cross-entropy loss of the softmax model.
Args:
user_embeddings: A tensor of shape [batch_size, embedding_dim].
movie_embeddings: A tensor of shape [num_movies, embedding_dim].
labels: A tensor of [batch_size], such that labels[i] is the target label
for example i.
Returns:
The mean cross-entropy loss.
"""
# Verify that the embddings have compatible dimensions
user_emb_dim = user_embeddings.shape[1].value
movie_emb_dim = movie_embeddings.shape[1].value
if user_emb_dim != movie_emb_dim:
raise ValueError(
"The user embedding dimension %d should match the movie embedding "
"dimension % d" % (user_emb_dim, movie_emb_dim))
logits = tf.matmul(user_embeddings, movie_embeddings, transpose_b=True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
return loss
###Output
_____no_output_____
###Markdown
Exercise 8: Build a softmax model, train it, and inspect its embeddings.We are now ready to build a softmax CFModel. Complete the `build_softmax_model` function in the next cell. The architecture of the model is defined in the function `create_user_embeddings` and illustrated in the figure below. The input embeddings (movie_id, genre and year) are concatenated to form the input layer, then we have hidden layers with dimensions specified by the `hidden_dims` argument. Finally, the last hidden layer is multiplied by the movie embeddings to obtain the logits layer. For the target label, we will use a randomly-sampled movie_id from the list of movies the user rated.Complete the function below by creating the feature columns and embedding columns, then creating the loss tensors both for the train and test sets (using the `softmax_loss` function of the previous exercise).
###Code
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
# ========================= Complete this section ============================
# train_loss =
# test_loss =
# test_precision_at_10 =
# ============================================================================
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
# @title Solution
def build_softmax_model(rated_movies, embedding_cols, hidden_dims):
"""Builds a Softmax model for MovieLens.
Args:
rated_movies: DataFrame of traing examples.
embedding_cols: A dictionary mapping feature names (string) to embedding
column objects. This will be used in tf.feature_column.input_layer() to
create the input layer.
hidden_dims: int list of the dimensions of the hidden layers.
Returns:
A CFModel object.
"""
def create_network(features):
"""Maps input features dictionary to user embeddings.
Args:
features: A dictionary of input string tensors.
Returns:
outputs: A tensor of shape [batch_size, embedding_dim].
"""
# Create a bag-of-words embedding for each sparse feature.
inputs = tf.feature_column.input_layer(features, embedding_cols)
# Hidden layers.
input_dim = inputs.shape[1].value
for i, output_dim in enumerate(hidden_dims):
w = tf.get_variable(
"hidden%d_w_" % i, shape=[input_dim, output_dim],
initializer=tf.truncated_normal_initializer(
stddev=1./np.sqrt(output_dim))) / 10.
outputs = tf.matmul(inputs, w)
input_dim = output_dim
inputs = outputs
return outputs
train_rated_movies, test_rated_movies = split_dataframe(rated_movies)
train_batch = make_batch(train_rated_movies, 200)
test_batch = make_batch(test_rated_movies, 100)
with tf.variable_scope("model", reuse=False):
# Train
train_user_embeddings = create_network(train_batch)
train_labels = select_random(train_batch["label"])
with tf.variable_scope("model", reuse=True):
# Test
test_user_embeddings = create_network(test_batch)
test_labels = select_random(test_batch["label"])
movie_embeddings = tf.get_variable(
"input_layer/movie_id_embedding/embedding_weights")
test_loss = softmax_loss(
test_user_embeddings, movie_embeddings, test_labels)
train_loss = softmax_loss(
train_user_embeddings, movie_embeddings, train_labels)
_, test_precision_at_10 = tf.metrics.precision_at_k(
labels=test_labels,
predictions=tf.matmul(test_user_embeddings, movie_embeddings, transpose_b=True),
k=10)
metrics = (
{"train_loss": train_loss, "test_loss": test_loss},
{"test_precision_at_10": test_precision_at_10}
)
embeddings = {"movie_id": movie_embeddings}
return CFModel(embeddings, train_loss, metrics)
###Output
_____no_output_____
###Markdown
Train the Softmax modelWe are now ready to train the softmax model. You can set the following hyperparameters:- learning rate- number of iterations. Note: you can run `softmax_model.train()` again to continue training the model from its current state.- input embedding dimensions (the `input_dims` argument)- number of hidden layers and size of each layer (the `hidden_dims` argument)Note: since our input features are string-valued (movie_id, genre, and year), we need to map them to integer ids. This is done using [`tf.feature_column.categorical_column_with_vocabulary_list`](https://www.tensorflow.org/api_docs/python/tf/feature_column/categorical_column_with_vocabulary_list), which takes a vocabulary list specifying all the values the feature can take. Then each id is mapped to an embedding vector using [`tf.feature_column.embedding_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/embedding_column).
###Code
# Create feature embedding columns
def make_embedding_col(key, embedding_dim):
categorical_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=key, vocabulary_list=list(set(movies[key].values)), num_oov_buckets=0)
return tf.feature_column.embedding_column(
categorical_column=categorical_col, dimension=embedding_dim,
# default initializer: trancated normal with stddev=1/sqrt(dimension)
combiner='mean')
with tf.Graph().as_default():
softmax_model = build_softmax_model(
rated_movies,
embedding_cols=[
make_embedding_col("movie_id", 35),
make_embedding_col("genre", 3),
make_embedding_col("year", 2),
],
hidden_dims=[35])
softmax_model.train(
learning_rate=8., num_iterations=3000, optimizer=tf.train.AdagradOptimizer)
###Output
_____no_output_____
###Markdown
Inspect the embeddingsWe can inspect the movie embeddings as we did for the previous models. Note that in this case, the movie embeddings are used at the same time as input embeddings (for the bag of words representation of the user history), and as softmax weights.
###Code
movie_neighbors(softmax_model, "Aladdin", DOT)
movie_neighbors(softmax_model, "Aladdin", COSINE)
movie_embedding_norm([reg_model, softmax_model])
tsne_movie_embeddings(softmax_model)
###Output
_____no_output_____ |
10_pipeline/airflow/02_Create_Airflow_Environment.ipynb | ###Markdown
Verify S3_BUCKET Bucket Creation
###Code
import boto3
import time
session = boto3.session.Session()
region = session.region_name
account_id = boto3.client("sts").get_caller_identity().get("Account")
s3 = boto3.Session().client(service_name="s3", region_name=region)
setup_s3_bucket_passed = False
%store -r airflow_bucket_name
%store -r s3_mwaa_private_path
%store -r s3_mwaa_dags_private_path
%store -r airflow_env_name
%store -r airflow_vpc_name
%store -r team_role_arn
%store -r airflow_sg_id
%store -r airflow_subnet_ids
!aws s3 ls $s3_mwaa_private_path
from botocore.client import ClientError
response = None
try:
response = s3.head_bucket(Bucket=airflow_bucket_name)
print(response)
setup_s3_bucket_passed = True
except ClientError as e:
print("[ERROR] Cannot find bucket {} in {} due to {}.".format(airflow_bucket_name, response, e))
# %store setup_s3_bucket_passed
###Output
_____no_output_____
###Markdown
Create Managed Apache Airflow Environment
###Code
mwaa = boto3.client("mwaa")
s3_mwaa_bucket_arn = "arn:aws:s3:::{}".format(airflow_bucket_name)
airflow_env_arn = mwaa.create_environment(
DagS3Path="dags",
ExecutionRoleArn=team_role_arn,
AirflowVersion="1.10.12",
WebserverAccessMode="PUBLIC_ONLY",
LoggingConfiguration={
"DagProcessingLogs": {"Enabled": True, "LogLevel": "ERROR"},
"SchedulerLogs": {"Enabled": True, "LogLevel": "ERROR"},
"TaskLogs": {"Enabled": True, "LogLevel": "INFO"},
"WebserverLogs": {"Enabled": True, "LogLevel": "ERROR"},
"WorkerLogs": {"Enabled": True, "LogLevel": "ERROR"},
},
MaxWorkers=3,
Name=airflow_env_name,
NetworkConfiguration={
"SecurityGroupIds": [
airflow_sg_id,
],
"SubnetIds": airflow_subnet_ids,
},
RequirementsS3ObjectVersion="latest",
RequirementsS3Path="requirements.txt",
SourceBucketArn=s3_mwaa_bucket_arn,
EnvironmentClass="mw1.small",
)
%store airflow_env_arn
###Output
_____no_output_____
###Markdown
Please be patient this can take around 15 Minutes.
###Code
def get_airflow_check():
response = mwaa.get_environment(Name=airflow_env_name)
mwaa_status = response["Environment"]["Status"]
return mwaa_status
mwaa_status = "CREATING"
print("Checking to see if MWAA Env: {} is ready.".format(airflow_env_name))
while get_airflow_check() != "AVAILABLE":
mwaa_status
time.sleep(60)
print("Still waiting for MWAA Environment...")
print("Sucess! MWAA Env: {} is ready!".format(airflow_env_name))
###Output
_____no_output_____
###Markdown
PLEASE MAKE SURE THAT THE ABOVE COMMAND RAN SUCESSFULLY BEFORE CONTINUING
###Code
response = mwaa.create_web_login_token(
Name=airflow_env_name
)
webServerHostName = response["WebServerHostname"]
webToken = response["WebToken"]
airflowUIUrl = 'https://'+webServerHostName+'/aws_mwaa/aws-console-sso?login=true#'+webToken
print("Here is your AirflowUI Url:"\n)
airflowUIUrl
###Output
_____no_output_____
###Markdown
Release Resources
###Code
%%html
<p><b>Shutting down your kernel for this notebook to release resources.</b></p>
<button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
<script>
try {
els = document.getElementsByClassName("sm-command-button");
els[0].click();
}
catch(err) {
// NoOp
}
</script>
%%javascript
try {
Jupyter.notebook.save_checkpoint();
Jupyter.notebook.session.delete();
}
catch(err) {
// NoOp
}
###Output
_____no_output_____ |
books/implied.ipynb | ###Markdown
implied countsAdjust counts for time lags and unconfirmed cases.
###Code
%load_ext autoreload
%autoreload 2
%autosave 0
import etl
from pandas import DataFrame
FIGSIZE = (9, 3)
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
realized countsLoad examples from [CoronaWatchNL] via [email protected].[CoronaWatchNL]: https://github.com/J535D165/CoronaWatchNL
###Code
real = etl.rivm().loc[:'2020-03-30']
real.plot(figsize=FIGSIZE, logy=True)
real[::5]
###Output
_____no_output_____
###Markdown
implied exposuresAssumptions:- False-positive rate P(confirmed | not exposed) = 0.- Confirmation rate P(confirmed | exposed) = `cprob`.- Confirmations occur `ctime` days after exposure.**Caution:** If testing is common, then ignoring false positives leads to the [prosecutor's fallacy].[prosecutor's fallacy]: https://en.wikipedia.org/wiki/Prosecutor's_fallacy
###Code
cprob = 0.01
ctime = 5
exposed = (
real['confirmed'].rename('exposed')
.div(cprob).shift(-ctime, freq=real.index.freq)
)
data = real.join(exposed, how='outer').diff()
axes = data.plot(figsize=FIGSIZE, grid=True, logy=True, title='new cases')
###Output
_____no_output_____
###Markdown
implied fatality ratesAssumptions:- Unconfirmed cases are never fatal.- Deaths occur `dtime` days after exposure.**Caution:** P(deceased | exposed) may be much less than [case fatality rate].[case fatality rate]: https://en.wikipedia.org/wiki/Case_fatality_rate
###Code
dtime = ctime + 7
confirmed = real['confirmed']
deceased = real['deceased']
cfr = deceased[-1] / confirmed.iat[-(1 + dtime - ctime)]
ifr = deceased[-1] / exposed.iat[-(1 + dtime)]
print(f"{cfr.round(3)} deaths per confirmed case")
print(f"{ifr.round(3)} deaths per implied exposure")
data = DataFrame(real['deceased'].rename('realized'))
data['expected'] = cfr * real['confirmed'].shift(dtime - ctime)
axes = data.diff().plot(figsize=FIGSIZE, title='new fatalities')
###Output
0.181 deaths per confirmed case
0.004 deaths per implied exposure
|
_notebooks/2021-04-23-Matplotlib .ipynb | ###Markdown
Matplotlib || pltPlotting library and its numerical math extension NumPy. It provides an object oriented API for embedding plots into applications using general purpose GUI toolkits like Tkinter wxPython, QT, or GTK+
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
x = np.arange(0,10)
y=np.arange(11,21)
a=np.arange(40,50)
b=np.arange(50,60)
###Output
_____no_output_____
###Markdown
Scatter plot
###Code
plt.scatter(x,y,c='g') # c= color
plt.xlabel('X axis')
plt.ylabel('Y axis')
plt.title('Graph in 2D')
plt.savefig('g1.png')
plt.show()
###Output
_____no_output_____
###Markdown
plt plot
###Code
plt.plot(x,y)
y=x*x
plt.plot(x,y)
plt.plot(x,y,'r')
plt.plot(x,y,'r--')
plt.plot(x,y,'r*-')
###Output
_____no_output_____
###Markdown
Subplots
###Code
plt.subplot(2,2,1) # 2 rows 2 cols 1 position
plt.plot(x,y,'r')
plt.subplot(2,2,2)
plt.plot(x,y,'g')
plt.subplot(2,2,3)
plt.plot(x,y,'b')
# compute x and y coordinates for points on a sine wave
np.pi
x = np.arange(0,4*np.pi,0.1)
y=np.sin(x)
plt.title("sine wave form")
plt.plot(x,y)
plt.show()
#subplot for sin and cos waves
x=np.arange(0,5*np.pi,0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
plt.subplot(2,1,1)
plt.plot(x,y_sin,'r--')
plt.title("sine graph")
plt.subplot(2,1,2)
plt.plot(x,y_cos,'g--')
plt.title("cosine graph")
plt.show()
###Output
_____no_output_____
###Markdown
Bar plot
###Code
x= [2,8,10]
y = [11,16,18]
x2 = [3,9,11]
y2 = [4,7,9]
plt.bar(x,y)
plt.bar(x2,y2,color ='g')
plt.title('Bar graph')
plt.ylabel( 'Yaxis')
plt.xlabel( 'Xaxis')
plt.show()
###Output
_____no_output_____
###Markdown
Histograms
###Code
a = np.array([1,2,3,4,5,5,6,67,7,8,8,9]) # y axis == bins - desity or count
plt.hist(a)
plt.title('histogram')
plt.show()
###Output
_____no_output_____
###Markdown
Box plot
###Code
# helps to find percentiles
data = [np.random.normal(0,std,100) for std in range(1,4)] # selecting a normal distribution b/w low=0, to std, step=100
# rectangular box plot
plt.boxplot(data, vert=True, patch_artist= True)
data
###Output
_____no_output_____
###Markdown
Pi chart
###Code
labels = 'python','c++', 'ruby', 'java'
sizes = [215,130,245,210]
colors = ['gold', 'yellowgreen','lightcoral', 'lightskyblue']
explode = (0.1,0,0,0) #explode 1st slice
#plot
plt.pie(sizes,explode=explode,labels=labels,colors=colors, autopct='%1.1f%%',shadow=True)
plt.axis('equal')
plt.show()
###Output
_____no_output_____ |
notebooks/cbd_for_life.ipynb | ###Markdown
Web scraper for Fulton and Roark stores Import packages
###Code
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from collections import namedtuple
import csv
###Output
_____no_output_____
###Markdown
Conect to a Firefox webdriver using Selenium
###Code
driver = webdriver.Firefox()
driver.get("https://cbdforlife.us/store-locator/")
###Output
_____no_output_____
###Markdown
Submit zip code to form, we need to check if there's a pop-up first & close it if so
###Code
submit_button = driver.find_element_by_id('storemapper-go')
try:
submit_button.click()
except:
popup = driver.find_element_by_id('popup')
popup_close = popup.find_element_by_class_name('close-modal')
popup_close.click()
submit_button.click()
###Output
_____no_output_____
###Markdown
Get a list of the stores
###Code
store_list_container = driver.find_element_by_id('storemapper-list')
store_list = store_list_container.find_elements_by_tag_name('li')
###Output
_____no_output_____
###Markdown
Create a namedtuple to store the data
###Code
store_info = namedtuple('StoreInfo', 'store_id store_name address phone')
###Output
_____no_output_____
###Markdown
Loop through all the stores and get the information for each
###Code
store_data = []
for store in store_list:
store_id = store.get_attribute('data-idx')
store_name = store.find_element_by_class_name('storemapper-title').text
try:
address = store.find_element_by_class_name('storemapper-address').text
except:
address = "No address found"
try:
phone = store.find_element_by_class_name('storemapper-phone').text
except:
phone = "No phone number found"
store = store_info(store_id, store_name, address, phone)
store_data.append(store)
###Output
_____no_output_____
###Markdown
Write data to csv file
###Code
with open('../data/cbd_for_life.csv', 'w') as f:
w = csv.writer(f)
w.writerow(('store_id', 'store_name', 'address', 'phone_number'))
w.writerows([store.store_id, store.store_name, store.address, store.phone] for store in store_data)
###Output
_____no_output_____ |
DigitalBiomarkers-HumanActivityRecognition/10_code/50_deep_learning/53_tensorflow_models/53_tensorflow_Duke_Data/.ipynb_checkpoints/20_ANN_window_feature_engineering_balanced-checkpoint.ipynb | ###Markdown
Window Feature Classification Model: ANN with Feature Engineering This file is composed of an artifical neural network classification model to evaluate if using features from windows of time (20 seconds with 10 second overlap), would generate a better model than our simple timepoint classifier. Leave-One-Person-Out (LOPO) Cross-Validation is used to validate the model. __INPUT: .csv files containing the rolled sensor data with feature engineering (engineered_features.csv)__ __OUTPUT: Neural Network Multi-Classification Window Featuer Model (F1 Score = 0.871)__ Imports
###Code
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
import seaborn as sns
import tensorflow as tf
###Output
_____no_output_____
###Markdown
Read in Data The loaded dataset contains windows of data that are 20 seconds long with a 10 second overlap. These are stored as arrays in the dataframe.
###Code
pd.set_option('display.max_columns', None)
df = pd.read_csv('/Users/N1/Data7/Data-2020/10_code/40_usable_data_for_models/41_Duke_Data/engineered_features.csv')
###Output
_____no_output_____
###Markdown
We add a window number that changes everytime there is a new activity present, as we wish to use this as a feature.
###Code
df = df.assign(count=df.groupby(df.Activity.ne(df.Activity.shift()).cumsum()).cumcount().add(1))
df.head(5)
###Output
_____no_output_____
###Markdown
Label Encode Activity and Subject_ID We encode the y variable as we need to one-hot encode this y variable for the model. The label each class is associated with is printed below.
###Code
from sklearn.preprocessing import LabelEncoder
le1 = LabelEncoder()
df['Activity'] = le1.fit_transform(df['Activity'])
activity_name_mapping = dict(zip(le1.classes_, le1.transform(le1.classes_)))
print(activity_name_mapping)
le = LabelEncoder()
df['Subject_ID'] = le.fit_transform(df['Subject_ID'])
###Output
_____no_output_____
###Markdown
Create Test Train split
###Code
np.random.seed(29)
rands = np.random.choice(df.Subject_ID.unique(),3, replace=False)
print(f' These will be our Subjects in our test set: {rands}')
###Output
These will be our Subjects in our test set: [39 17 45]
###Markdown
Split Subjects into Test and Train Sets (n=52, 3)
###Code
test = df[df['Subject_ID'].isin(rands)]
train = df[-df['Subject_ID'].isin(rands)]
###Output
_____no_output_____
###Markdown
Feature Selection Choose features to be used in model Pick one of the three following code cells to choose what features are used in the model. Do not run them all. To uncomment or comment multiple selected lines, press control + /. All Features
###Code
# train = train[['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std',
# 'ACC3_std', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'Magnitude_std', 'ACC1_min', 'ACC2_min', 'ACC3_min', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min', 'Magnitude_min',
# 'ACC1_max', 'ACC2_max', 'ACC3_max', 'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']]
# test = test [['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std',
# 'ACC3_std', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'Magnitude_std', 'ACC1_min', 'ACC2_min', 'ACC3_min', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min', 'Magnitude_min',
# 'ACC1_max', 'ACC2_max', 'ACC3_max', 'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']]
###Output
_____no_output_____
###Markdown
Mechanical Features
###Code
# train = train[['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std', 'ACC3_std', 'Magnitude_std',
# 'ACC1_min', 'ACC2_min', 'ACC3_min', 'Magnitude_min',
# 'ACC1_max', 'ACC2_max', 'ACC3_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']]
# test = test[['ACC1_mean', 'ACC2_mean', 'ACC3_mean', 'Magnitude_mean', 'ACC1_std', 'ACC2_std', 'ACC3_std', 'Magnitude_std',
# 'ACC1_min', 'ACC2_min', 'ACC3_min', 'Magnitude_min',
# 'ACC1_max', 'ACC2_max', 'ACC3_max', 'Magnitude_max', 'Subject_ID', 'count', 'Activity']]
###Output
_____no_output_____
###Markdown
Physiological Features
###Code
train = train[['TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min',
'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Subject_ID', 'count', 'Activity']]
test = test[['TEMP_mean', 'EDA_mean', 'BVP_mean', 'HR_mean', 'TEMP_std', 'EDA_std', 'BVP_std', 'HR_std', 'TEMP_min', 'EDA_min', 'BVP_min', 'HR_min',
'TEMP_max', 'EDA_max', 'BVP_max', 'HR_max', 'Subject_ID', 'count', 'Activity']]
###Output
_____no_output_____
###Markdown
Balancing Classes In the following code cells, we randomly sample data from our majority classes to balance our dataset.
###Code
#{'Activity': 0, 'Baseline': 1, 'DB': 2, 'Type': 3}
train['Activity'].value_counts()
zero = train[train['Activity'] == 0]
one = train[train['Activity'] == 1]
two = train[train['Activity'] == 2]
three =train[train['Activity'] == 3]
zero = zero.sample(505)
one = one.sample(505)
train = pd.concat([zero, one, two, three])
train['Activity'].value_counts()
###Output
_____no_output_____
###Markdown
This train_SID is made so we can use the Subject_ID values to perform LOPO (leave one person out) later on.
###Code
train_SID = train['Subject_ID'].values
###Output
_____no_output_____
###Markdown
Apply one-hot encoding to Subject ID and window count Subject_ID and window count must be one-hot encoded to be used as features in our model. Test and train dataframes must be concatenated before we one-hot encode, so that we do not get different encodings for each data set.
###Code
train['train'] =1
test['train'] = 0
combined = pd.concat([train, test])
combined = pd.concat([combined, pd.get_dummies(combined['Subject_ID'], prefix = 'SID')], axis =1).drop('Subject_ID', axis =1)
combined = pd.concat([combined, pd.get_dummies(combined['count'], prefix = 'count')], axis =1).drop('count', axis = 1)
train = combined[combined['train'] == 1]
test = combined[combined['train'] == 0]
train.drop(["train"], axis = 1, inplace = True)
test.drop(["train"], axis = 1, inplace = True)
print(train.shape, test.shape)
###Output
(2020, 130) (310, 130)
###Markdown
We remove activity from our train and test datasets as this is the y variable (target variable) and we are only interested in keeping the features.
###Code
train_f = train.drop("Activity", axis =1)
test_f = test.drop("Activity", axis =1)
###Output
_____no_output_____
###Markdown
Define X (features) and y (targets)
###Code
X_train = train_f
y_train = train.Activity
X_test = test_f
y_test = test.Activity
###Output
_____no_output_____
###Markdown
Standardize Data Scaling is used to change values without distorting differences in the range of values for each sensor. We do this because different sensor values are not in similar ranges of each other and if we did not scale the data, gradients may oscillate back and forth and take a long time before finding the local minimum. It may not be necessary for this data, but to be sure, we normalized the features.The standard score of a sample x is calculated as:$$z = \frac{x-u}{s}$$Where u is the mean of the data, and s is the standard deviation of the data of a single sample. The scaling is fit on the training set and applied to both the training and test set.
###Code
sc = StandardScaler()
X_train.iloc[:,:16] = sc.fit_transform(X_train.iloc[:,:16])
X_test.iloc[:,:16] = sc.transform(X_test.iloc[:,:16])
X_train
X_train = X_train.values
X_test = X_test.values
from keras.utils import np_utils
y_train_dummy = np_utils.to_categorical(y_train)
y_test_dummy = np_utils.to_categorical(y_test)
###Output
_____no_output_____
###Markdown
Neural Network- 6 hidden **fully connected** layers with 32 nodes- The **Dropout** layer randomly sets input units to 0 with a frequency of rate at each step during training time, which helps prevent overfitting.- **Softmax** acitvation function - Used to generate probabilities for each class as an output in the final fully connected layer of the model We decided to use ADAM as our optimizer as it is computationally efficient and updates the learning rate on a per-parameter basis, based on a moving estimate per-parameter gradient, and the per-parameter squared gradient.
###Code
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
###Output
_____no_output_____
###Markdown
LOOCV __Leave One Out CV:__Each observation is considered as a validation set and the rest n-1 observations are a training set. Fit the model and predict using 1 observation validation set. Repeat this for n times for each observation as a validation set.Test-error rate is average of all n errors.__Advantages:__ takes care of both drawbacks of validation-set method1. No randomness of using some observations for training vs. validation set like in validation-set method as each observation is considered for both training aโบnd validation. So overall less variability than Validation-set method due to no randomness no matter how many times you run it.2. Less bias than validation-set method as training-set is of n-1 size. Because of this reduced bias, reduced over-estimation of test-error, not as much compared to validation-set method.__Disadvantages:__1. Even though each iterations test-error is un-biased, it has a high variability as only one-observation validation-set was used for prediction.2. Computationally expensive (time and power) especially if dataset is big with large n as it requires fitting the model n times. Also some statistical models have computationally intensive fitting so with large dataset and these models LOOCV might not be a good choice.
###Code
from sklearn.model_selection import LeaveOneGroupOut
# Lists to store metrics
acc_per_fold = []
loss_per_fold = []
f1_per_fold = []
# Define the K-fold Cross Validator
groups = train_SID
inputs = X_train
targets = y_train_dummy
logo = LeaveOneGroupOut()
logo.get_n_splits(inputs, targets, groups)
cv = logo.split(inputs, targets, groups)
# LOGO
fold_no = 1
for train, test in cv:
#Define the model architecture
model = Sequential()
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(4, activation='softmax')) #4 outputs are possible
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Generate a print
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
# Fit data to model
history = model.fit(inputs[train], targets[train],
batch_size=32,
epochs=10,
verbose=1)
# Generate generalization metrics
scores = model.evaluate(inputs[test], targets[test], verbose=0)
y_pred = np.argmax(model.predict(inputs[test]), axis=-1)
f1 = (f1_score(np.argmax(targets[test], axis=1), (y_pred), average = 'weighted'))
print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%, F1 of {f1}')
f1_per_fold.append(f1)
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
# Increase fold number
fold_no = fold_no + 1
# == Provide average scores ==
print('------------------------------------------------------------------------')
print('Score per fold')
for i in range(0, len(acc_per_fold)):
print('------------------------------------------------------------------------')
print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Accuracy: {acc_per_fold[i]}% - F1:{f1_per_fold[i]}%')
print('------------------------------------------------------------------------')
print('Average scores for all folds:')
print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
print(f'> F1: {np.mean(f1_per_fold)} (+- {np.std(f1_per_fold)})')
print(f'> Loss: {np.mean(loss_per_fold)}')
print('------------------------------------------------------------------------')
###Output
------------------------------------------------------------------------
Training for fold 1 ...
Epoch 1/10
62/62 [==============================] - 0s 2ms/step - loss: 1.2943 - accuracy: 0.3921
Epoch 2/10
62/62 [==============================] - 0s 2ms/step - loss: 1.0806 - accuracy: 0.5488
Epoch 3/10
62/62 [==============================] - 0s 2ms/step - loss: 0.7433 - accuracy: 0.6983
Epoch 4/10
62/62 [==============================] - 0s 3ms/step - loss: 0.6003 - accuracy: 0.7736
Epoch 5/10
62/62 [==============================] - 0s 3ms/step - loss: 0.5082 - accuracy: 0.8140
Epoch 6/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4101 - accuracy: 0.8474
Epoch 7/10
62/62 [==============================] - 0s 2ms/step - loss: 0.3453 - accuracy: 0.8762
Epoch 8/10
62/62 [==============================] - 0s 3ms/step - loss: 0.3055 - accuracy: 0.8853
Epoch 9/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2709 - accuracy: 0.8994
Epoch 10/10
62/62 [==============================] - 0s 3ms/step - loss: 0.2085 - accuracy: 0.9277
Score for fold 1: loss of 0.4869212210178375; accuracy of 85.36585569381714%, F1 of 0.8534713991641194
------------------------------------------------------------------------
Training for fold 2 ...
Epoch 1/10
62/62 [==============================] - 0s 2ms/step - loss: 1.2803 - accuracy: 0.4075
Epoch 2/10
62/62 [==============================] - 0s 2ms/step - loss: 0.9858 - accuracy: 0.5971
Epoch 3/10
62/62 [==============================] - 0s 2ms/step - loss: 0.6914 - accuracy: 0.7290
Epoch 4/10
62/62 [==============================] - 0s 2ms/step - loss: 0.5834 - accuracy: 0.7765
Epoch 5/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4761 - accuracy: 0.8170
Epoch 6/10
62/62 [==============================] - 0s 2ms/step - loss: 0.3916 - accuracy: 0.8519
Epoch 7/10
62/62 [==============================] - 0s 3ms/step - loss: 0.3082 - accuracy: 0.8868
Epoch 8/10
62/62 [==============================] - 0s 3ms/step - loss: 0.2462 - accuracy: 0.9166
Epoch 9/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2660 - accuracy: 0.9090
Epoch 10/10
62/62 [==============================] - 0s 2ms/step - loss: 0.1853 - accuracy: 0.9312
Score for fold 2: loss of 2.1405515670776367; accuracy of 66.66666865348816%, F1 of 0.62548945307566
------------------------------------------------------------------------
Training for fold 3 ...
Epoch 1/10
62/62 [==============================] - 0s 2ms/step - loss: 1.2845 - accuracy: 0.4016
Epoch 2/10
62/62 [==============================] - 0s 2ms/step - loss: 0.9895 - accuracy: 0.5928
Epoch 3/10
62/62 [==============================] - 0s 2ms/step - loss: 0.7105 - accuracy: 0.7245
Epoch 4/10
62/62 [==============================] - 0s 2ms/step - loss: 0.5786 - accuracy: 0.7770
Epoch 5/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4698 - accuracy: 0.8280
Epoch 6/10
62/62 [==============================] - 0s 2ms/step - loss: 0.3971 - accuracy: 0.8587
Epoch 7/10
62/62 [==============================] - 0s 2ms/step - loss: 0.3694 - accuracy: 0.8673
Epoch 8/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2914 - accuracy: 0.8961
Epoch 9/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2447 - accuracy: 0.9117
Epoch 10/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2243 - accuracy: 0.9228
Score for fold 3: loss of 1.298040747642517; accuracy of 63.15789222717285%, F1 of 0.640759507464313
------------------------------------------------------------------------
Training for fold 4 ...
Epoch 1/10
62/62 [==============================] - 0s 2ms/step - loss: 1.2915 - accuracy: 0.3963
Epoch 2/10
62/62 [==============================] - 0s 3ms/step - loss: 1.0107 - accuracy: 0.5870
Epoch 3/10
62/62 [==============================] - 0s 2ms/step - loss: 0.7304 - accuracy: 0.7298
Epoch 4/10
62/62 [==============================] - 0s 3ms/step - loss: 0.5906 - accuracy: 0.7788
Epoch 5/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4891 - accuracy: 0.8112
Epoch 6/10
62/62 [==============================] - 0s 2ms/step - loss: 0.3915 - accuracy: 0.8512
Epoch 7/10
62/62 [==============================] - 0s 3ms/step - loss: 0.3385 - accuracy: 0.8725
Epoch 8/10
62/62 [==============================] - 0s 3ms/step - loss: 0.2940 - accuracy: 0.9013
Epoch 9/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2400 - accuracy: 0.9109
Epoch 10/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2347 - accuracy: 0.9185
Score for fold 4: loss of 1.9513981342315674; accuracy of 56.81818127632141%, F1 of 0.5692974063385632
------------------------------------------------------------------------
Training for fold 5 ...
Epoch 1/10
62/62 [==============================] - 0s 2ms/step - loss: 1.2842 - accuracy: 0.3982
Epoch 2/10
62/62 [==============================] - 0s 2ms/step - loss: 1.0430 - accuracy: 0.5643
Epoch 3/10
62/62 [==============================] - 0s 3ms/step - loss: 0.7009 - accuracy: 0.7204
Epoch 4/10
62/62 [==============================] - 0s 2ms/step - loss: 0.5586 - accuracy: 0.7888
Epoch 5/10
62/62 [==============================] - 0s 3ms/step - loss: 0.4598 - accuracy: 0.8247
Epoch 6/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4015 - accuracy: 0.8465
Epoch 7/10
62/62 [==============================] - 0s 2ms/step - loss: 0.3041 - accuracy: 0.8880
Epoch 8/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2761 - accuracy: 0.9043
Epoch 9/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2274 - accuracy: 0.9245
Epoch 10/10
62/62 [==============================] - 0s 3ms/step - loss: 0.2438 - accuracy: 0.9179
Score for fold 5: loss of 2.1546151638031006; accuracy of 52.173912525177%, F1 of 0.5015007875419775
------------------------------------------------------------------------
Training for fold 6 ...
Epoch 1/10
62/62 [==============================] - 0s 2ms/step - loss: 1.2819 - accuracy: 0.3965
Epoch 2/10
62/62 [==============================] - 0s 2ms/step - loss: 1.0358 - accuracy: 0.5742
Epoch 3/10
62/62 [==============================] - 0s 3ms/step - loss: 0.7073 - accuracy: 0.7227
Epoch 4/10
62/62 [==============================] - 0s 3ms/step - loss: 0.5945 - accuracy: 0.7707
Epoch 5/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4975 - accuracy: 0.8141
Epoch 6/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4049 - accuracy: 0.8449
Epoch 7/10
62/62 [==============================] - 0s 3ms/step - loss: 0.3226 - accuracy: 0.8828
Epoch 8/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2843 - accuracy: 0.9000
Epoch 9/10
62/62 [==============================] - 0s 3ms/step - loss: 0.2398 - accuracy: 0.9152
Epoch 10/10
62/62 [==============================] - 0s 2ms/step - loss: 0.1969 - accuracy: 0.9318
Score for fold 6: loss of 1.3011457920074463; accuracy of 60.00000238418579%, F1 of 0.5218390804597701
------------------------------------------------------------------------
Training for fold 7 ...
Epoch 1/10
62/62 [==============================] - 0s 3ms/step - loss: 1.2874 - accuracy: 0.4134
Epoch 2/10
62/62 [==============================] - 0s 3ms/step - loss: 1.0345 - accuracy: 0.5634
Epoch 3/10
62/62 [==============================] - 0s 3ms/step - loss: 0.7292 - accuracy: 0.7234
Epoch 4/10
62/62 [==============================] - 0s 2ms/step - loss: 0.5937 - accuracy: 0.7739
Epoch 5/10
62/62 [==============================] - 0s 2ms/step - loss: 0.4759 - accuracy: 0.8294
Epoch 6/10
62/62 [==============================] - 0s 3ms/step - loss: 0.4015 - accuracy: 0.8546
Epoch 7/10
62/62 [==============================] - 0s 3ms/step - loss: 0.3379 - accuracy: 0.8758
Epoch 8/10
62/62 [==============================] - 0s 3ms/step - loss: 0.3177 - accuracy: 0.8935
Epoch 9/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2565 - accuracy: 0.9086
Epoch 10/10
62/62 [==============================] - 0s 2ms/step - loss: 0.2186 - accuracy: 0.9202
Score for fold 7: loss of 1.9404150247573853; accuracy of 53.84615659713745%, F1 of 0.5082417582417582
------------------------------------------------------------------------
Training for fold 8 ...
Epoch 1/10
###Markdown
Please edit the name of the model below. This will be used to save the model and figures associated with the model.
###Code
model_name = '20_TF_FE_balanced_Phys_Only'
!mkdir -p saved_model
model.save(f'saved_model/{model_name}')
###Output
INFO:tensorflow:Assets written to: saved_model/20_TF_FE_balanced_Phys_Only/assets
###Markdown
Prediction We obtain the predicted class for each test set sample by using the argmax function on the predicted probabilities that are output from our model. Argmax returns the class with the highest probability.
###Code
model = tf.keras.models.load_model(f'saved_model/{model_name}')
y_pred = np.argmax(model.predict(X_test), axis=-1)
results = model.evaluate(X_test, y_test_dummy, batch_size=32)
print("Test loss, Test acc:", results)
###Output
10/10 [==============================] - 0s 1ms/step - loss: 2.6477 - accuracy: 0.4000
Test loss, Test acc: [2.6476845741271973, 0.4000000059604645]
###Markdown
A **confusion matrix** is generated to observe where the model is classifying well and to see classes which the model is not classifying well.
###Code
cm = confusion_matrix(y_test,y_pred)
cm
###Output
_____no_output_____
###Markdown
We normalize the confusion matrix to better understand the proportions of classes classified correctly and incorrectly for this model.
###Code
cm= cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]
cm
ax = plt.subplot()
sns.heatmap(cm, annot = True, fmt = '.2f',cmap = 'Blues', xticklabels = le1.classes_, yticklabels = le1.classes_)
ax.set_xlabel("Predicted labels")
ax.set_ylabel('Actual labels')
plt.title('Feature Engineered STEP balanced - Confusion Matrix')
plt.savefig(f'20_figures/{model_name}_CF.png')
###Output
_____no_output_____
###Markdown
The **accuracy** score represents the proportion of correct classifications over all classifications. The **F1 score** is a composite metric of two other metrics:Specificity: proportion of correct 'positive predictions' over all 'positive' predictions.Sensitivity: number of correct 'negative' predictions over all 'negative' predictions.The F1 score gives insight as to whether all classes are predicted correctly at the same rate. A low F1 score and high accuracy can indicate that only a majority class is predicted.
###Code
a_s = accuracy_score(y_test, y_pred)
f1_s = f1_score(y_test, y_pred, average = 'weighted')
print(f'Accuracy Score: {a_s:.3f} \nF1 Score: {f1_s:.3f}')
###Output
Accuracy Score: 0.400
F1 Score: 0.377
|
python/ch6/6.7.1-model-parameter-estimation.ipynb | ###Markdown
Let us suppose that we know that the height of an adult resident in Statsville lies between 160 cm and 170 cm.We want to predict if this resident is female. For this purpose, we have collected a set of height samples from adult female residents in Statsville. This becomes our training data. From physical considerations, we can assume that the distribution of heights is Gaussian. Our goal is to estimate the parameters ($\mu$, $\sigma$) of this Gaussian. In this notebook, we will study two ways of doing this (1) Maximum Likelihood Estimation and (2) Maximum A Posteriori Estimation. Let us first create the dataset $X$ by sampling 10000 points from a Gaussian distribution with $\mu$=152 and $\sigma$=8. In real life scenarios, we do not know the mean and standard deviation of the true distribution. But for the sake of this example, let's assume that the mean height is 152cm and standard deviation is 50cm.
###Code
torch.random.manual_seed(42)
num_samples = 10000
mu = 152
sigma = 8
X = torch.normal(mu, sigma, size=(num_samples, 1))
print('Dataset shape: {}'.format(X.shape))
###Output
Dataset shape: torch.Size([10000, 1])
###Markdown
Maximum Likelihood Estimate (MLE)In MLE, we try to find the parameters that "best explain" our data. In other words, we try to find the parameters that maximize the joint likelihood of our training data instances. Let's say our model is parameterised by $\theta$. The likelihood function $p(X|\theta)$ shows how likely the sample distribution $X$ is for different values of $\theta$. With MLE, our goal is to find the parameters $\theta$ that maximise $p(X|\theta)$. We can assume that our model parameterises a Gaussian distribution $N(\mu, \sigma)$The likelihood function can be written as $$ p(X|\theta) = N(X|\mu,\sigma) = \prod_{i=1}^{n}N(x_i|\mu,\sigma) = \Bigl(\frac{1}{2\pi\sigma^2}\Bigl)^\frac{N}{2}exp\Bigl(\frac{-1}{2\sigma^2}\sum_{i=1}^N(x_i - \mu)^2\Bigl) $$Maximising the likelihood function yields $$\mu_{MLE} = \frac{1}{N}\sum_{i=1}^nx_i$$$$\sigma_{MLE} = \frac{1}{N}\sum_{i=1}^n(x_i - \mu)^2$$In practice, we maximise the logarithm of the likelihood because it makes for much easier calculations when dealing with exponential functions. Refer to the section 6.8 in the book for a detailed derivationThus by computing the sample mean and the sample standard deviation, we can find the parameters of the best fit Gaussian for the dataset. Once we estimate the parameters, we can find out the probability that a sample lies in the range using the following formula$$ p(a < X <= b) = \int_{a}^b p(X) dX $$
###Code
# Let us compute the mean and standard deviation of the sampled points.
sample_mean = X.mean()
sample_std = X.std()
print('Sample mean: {}'.format(sample_mean))
print('Sample standard deviation: {}'.format(sample_std))
# As expected, the sample mean and sample standard deviation are close to the corresponding values
# of the Normal distribution that the points were sampled from
gaussian_mle = Normal(sample_mean, sample_std)
# We want to find out the probability that a height between 160 and 170 belongs to an adult female resident
a, b = torch.Tensor([160]), torch.Tensor([170])
prob = gaussian_mle.cdf(b) - gaussian_mle.cdf(a)
print('Prob: {}'.format(prob))
a, b = torch.Tensor([160]), torch.Tensor([170])
prob = gaussian_mle.cdf(b) - gaussian_mle.cdf(a)
###Output
_____no_output_____
###Markdown
Maximum Likelihood Estimate using Gradient DescentAbove, we were able to estimate the parameters using the closed form solution. Now, let us try to arrive at these parameters iteratively using gradient descent. In real-life scenarios, we don't use gradient descent because the closed form solution is available. But we discuss the gradient descent based approach to highlight some of the challenges.Our objective is to find the parameters $\theta$ that maximise the likelihood function $p(X|\theta)$. We choose to maximise the log of the likelihood function since it is more mathematically convenient. This can alternatively viewed as minimising the negative log-likelihood function.$$ -\log p(X|\theta) = \frac{N}{2}\log(2\pi\sigma^2) + \frac{1}{2\sigma^2}\sum_{i=1}^n(x_i - \mu)^2$$The optimisation process is as follows: 1. Initialise the model parameters, $\mu$ and $\sigma$ with random values 2. Compute the loss value (negative log-likelihood) 3. Find the gradients of the loss w.r.t the model parameters 4. Update the model parameters in the opposite direction of the gradient values 5. Repeat steps 1-4 until loss diminishes to a small value
###Code
import torch
from torch.autograd import Variable
dtype = torch.FloatTensor
# Negative log likelihood function defined above
def neg_log_likelihood(X, mu, sigma):
N = X.shape[0]
X_minus_mu = torch.sub(X, mu)
return torch.mul(0.5 * N, torch.log(2 * np.pi * torch.pow(sigma, 2))) + \
torch.div(torch.matmul(X_minus_mu.T, X_minus_mu), 2 * torch.pow(sigma, 2))
# Gradient descent to estimate the parameters
def optimise(X, mu, sigma, loss_fn, num_iters=100, lr = 0.001):
X = torch.Tensor(X) # Convert the data to a torch tensor
iters, losses, mus, sigmas = [], [], [], []
for i in range(num_iters):
loss = loss_fn(X, mu, sigma)
if i % (num_iters / 10) == 0:
print('iter: {}, loss: {}, mu: {}, sigma: {}'.format(i, loss[0][0] / num_samples,
mu.data[0], sigma.data[0]))
iters.append(i)
losses.append(loss[0][0] / num_samples)
mus.append(float(mu.data))
sigmas.append(float(sigma.data))
# We don't explicitly compute the gradients ourselves. We rely on torch to automatically
# compute the gradients. The gradients are stored in <param>.grad.
loss.backward()
# We scale the gradients by the learning rate before update
mu.data -= lr * mu.grad
sigma.data -= lr * sigma.grad
# We zero out the gradients before every update. Otherwise gradients from previous iterations get accumulated
mu.grad.data.zero_()
sigma.grad.data.zero_()
return iters, losses, mus, sigmas
def plot_mle_fit(iters, mus, sigmas, expected_mu=152, expected_sigma=8,
mu_text_y=160, sigma_text_y=100):
plt.figure(figsize=(4, 4))
ax1 = plt.subplot(2, 1, 1)
ax1.plot(iters, mus)
ax1.plot([iters[0], iters[-1]], [expected_mu, expected_mu], color='green')
ax1.text(iters[int(len(iters) / 2)], mu_text_y, r'Expected $\mu={}$'.format(expected_mu))
ax1.set_xlabel('Iteration')
ax1.set_ylabel('Mean (cm)')
ax1.grid(True)
ax2 = plt.subplot(2, 1, 2)
ax2.plot(iters, sigmas)
ax2.plot([iters[0], iters[-1]], [expected_sigma, expected_sigma], color='green')
ax2.text(iters[int(len(iters) / 2)], sigma_text_y, r'Expected $\sigma={}$'.format(expected_sigma))
ax2.set_xlabel('Iteration')
ax2.set_ylabel('Sigma (cm)')
ax2.grid(True)
plt.show()
# We define our model params mu and sigma as torch Variables.
# Note that requires_grad has been set to true - this tells PyTorch that gradients are
# required to be computed for these variables. We randomly initialise both mu and sigma.
mu = Variable(torch.Tensor([1]).type(dtype), requires_grad=True)
sigma = Variable(torch.Tensor([1]).type(dtype), requires_grad=True)
iters, losses, mus, sigmas = optimise(X, mu, sigma, neg_log_likelihood, num_iters=1000, lr=0.001)
plot_mle_fit(iters, mus, sigmas, mu_text_y=200, sigma_text_y=10000)
###Output
_____no_output_____
###Markdown
What just happened? The estimated mean and standard deviation are nowhere close to our expected values of 152 and 50, but are instead very large numbers.Let's try to tweak things a bit. We had randomly initialised mu and sigma with a value of 5. Now, let us initialise them somewhere in the neighborhood of our expected values. Let's say mu = 100 and sigma is 10.
###Code
# Let us initialise mu to 100 and sigma to 10
mu = Variable(torch.Tensor([100]).type(dtype), requires_grad=True)
sigma = Variable(torch.Tensor([10]).type(dtype), requires_grad=True)
iters, losses, mus, sigmas = optimise(X, mu, sigma, neg_log_likelihood, num_iters=500)
###Output
iter: 0, loss: 17.09115219116211, mu: 100.0, sigma: 10.0
iter: 50, loss: 4.972278594970703, mu: 118.44664001464844, sigma: 38.49307632446289
iter: 100, loss: 4.69744873046875, mu: 129.0364227294922, sigma: 33.65632247924805
iter: 150, loss: 4.295865058898926, mu: 139.37002563476562, sigma: 24.12705421447754
iter: 200, loss: 3.511776924133301, mu: 151.1002960205078, sigma: 8.47098159790039
iter: 250, loss: 3.502779722213745, mu: 152.05128479003906, sigma: 8.035272598266602
iter: 300, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918
iter: 350, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918
iter: 400, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918
iter: 450, loss: 3.502779722213745, mu: 152.0514678955078, sigma: 8.035273551940918
###Markdown
Our model has converged! The loss has decreased and the estimated $\mu$ and $\sigma$ are close to 152 and 50 respectively. The initial values of $\mu$ and $\sigma$ played a crucial role in helping the model converge. We were lucky this time because we knew what to expect for $\mu$ and $\sigma$. However, this is typically not the case in real world scenarios. Is there a better way to solve this problem? This is where Maximum A Posterior (MAP) estimation comes into play.
###Code
plot_mle_fit(iters, mus, sigmas, mu_text_y=145, sigma_text_y=10)
###Output
_____no_output_____
###Markdown
Maximum A Posteriori (MAP) EstimationInstead of maximizing $p(X|\theta)$, we can directly maximize $p(\theta|X)$ -> the probability of the parameters given the data instances.Using bayes theorem, $$p(\theta|X) = \frac{p(X|\theta)p(\theta)}{p(X)}$$Maximizing $p(\theta|X)$ is equivalent to maximizing the numerator of the above expression because the denominator is independent of $\theta$. $p(X|\theta)$ is what we maximized in MLE. We need to estimate $p(\theta)$, which is also called the prior probability. A popular approach is to say that we want the parameters to be as small as possible. Hence, we assume that $p(\theta)$ is proportional to $e^{-\theta^2}$. Refer to the chapter in the book for details. Maximizing $p(\theta|X)$ is equivalent to minimizing $-\log p(\theta|X)$$$ -\log p(\theta|X) = -\log p(X|\theta) - \log p(\theta) = \frac{N}{2}\log(2\pi\sigma^2) + \frac{1}{2\sigma^2}\sum_{i=1}^n(x_i - \mu)^2 + \mu^2 + \sigma ^ 2 $$This is the same function as the negative log-likelihood discussed in MLE with two additional terms, $\mu^2$ and $\sigma^2$. They act as regularizers and prevent the parameters from exploding by penalizing large values of $\mu$ and $\sigma$.
###Code
# Here we add two additional parameters to the loss function namely mu^2 and sigma^2
# These parameter terms act as regularizers that penalise large values of mu and sigma.
def neg_log_likelihood_regularized(X, mu, sigma, k=0.2):
"""
k is the regularization factor that controls the weight of the regularization loss
"""
N = X.shape[0]
X_minus_mu = torch.sub(X, mu)
loss_likelihood = torch.mul(0.5 * N, torch.log(2 * np.pi * torch.pow(sigma, 2))) + \
torch.div(torch.matmul(X_minus_mu.T, X_minus_mu), 2 * torch.pow(sigma, 2))
loss_reg = k * (torch.pow(mu, 2) + torch.pow(sigma, 2))
return loss_likelihood + loss_reg
# Let us run the optimiser with the regularise log likelihood function
mu = Variable(torch.Tensor([1]).type(dtype), requires_grad=True)
sigma = Variable(torch.Tensor([1]).type(dtype), requires_grad=True)
iters, losses, mus, sigmas = optimise(X, mu, sigma, loss_fn=neg_log_likelihood_regularized, num_iters=30000, lr=0.001)
plot_mle_fit(iters, mus, sigmas, mu_text_y=180, sigma_text_y=3000)
###Output
_____no_output_____ |
Linked List/0902/143. Reorder List.ipynb | ###Markdown
่ฏดๆ๏ผ ็ปๅฎไธไธชๅ้พๅ่กจL๏ผL0โL1โโฆโLn-1โLn๏ผๅฐๅ
ถ้ๆฐๆๅบไธบ๏ผL0โLnโL1โLn-1โL2โLn-2โโฆใ ๆจๅฏ่ฝๆ ๆณไฟฎๆนๅ่กจ่็นไธญ็ๅผ๏ผๅช่ฝๆดๆน่็นๆฌ่บซใExample 1: Given 1->2->3->4, reorder it to 1->4->2->3.Example 2: Given 1->2->3->4->5, reorder it to 1->5->2->4->3.
###Code
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
# ----------------------------------------------
# Save linked list in array
arr = []
cur, length = head, 0
while cur:
arr.append(cur.val)
cur = cur.next
length += 1
left = 0
right = length - 1
last = head
while left < right:
arr[left].next = arr[right]
left += 1
if left == right:
last = arr[right]
break
arr[right].next = arr[left]
right -= 1
last = arr[left]
if last:
last.next = None
###Output
_____no_output_____ |
examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 3 class Amazon Phone review classifier training]With the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode) 1. Install Java 8 and NLU
###Code
import os
from sklearn.metrics import classification_report
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install pyspark==2.4.7
! pip install nlu > /dev/null
import nlu
###Output
_____no_output_____
###Markdown
2. Download Amazon Unlocked mobile phones dataset https://www.kaggle.com/PromptCloudHQ/amazon-reviews-unlocked-mobile-phonesdataset with unlocked mobile phone reviews in 5 review classes
###Code
! wget http://ckl-it.de/wp-content/uploads/2021/01/Amazon_Unlocked_Mobile.csv
import pandas as pd
test_path = '/content/Amazon_Unlocked_Mobile.csv'
train_df = pd.read_csv(test_path,sep=",")
cols = ["y","text"]
train_df = train_df[cols]
train_df
###Output
_____no_output_____
###Markdown
3. Train Deep Learning Classifier using nlu.load('train.classifier')You dataset label column should be named 'y' and the feature column with text data should be named 'text'
###Code
# load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns
# Since there are no
trainable_pipe = nlu.load('train.classifier')
fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] )
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:50] )
preds
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
###Markdown
Test the fitted pipe on new example
###Code
fitted_pipe.predict("It worked perfectly .")
###Output
_____no_output_____
###Markdown
Configure pipe training parameters
###Code
trainable_pipe.print_info()
###Output
The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :
>>> pipe['classifier_dl'] has settable params:
pipe['classifier_dl'].setMaxEpochs(3) | Info: Maximum number of epochs to train | Currently set to : 3
pipe['classifier_dl'].setLr(0.005) | Info: Learning Rate | Currently set to : 0.005
pipe['classifier_dl'].setBatchSize(64) | Info: Batch size | Currently set to : 64
pipe['classifier_dl'].setDropout(0.5) | Info: Dropout coefficient | Currently set to : 0.5
pipe['classifier_dl'].setEnableOutputLogs(True) | Info: Whether to use stdout in addition to Spark logs. | Currently set to : True
>>> pipe['default_tokenizer'] has settable params:
pipe['default_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+
pipe['default_tokenizer'].setContextChars(['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]) | Info: character list used to separate from token boundaries | Currently set to : ['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]
pipe['default_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True
pipe['default_tokenizer'].setMinLength(0) | Info: Set the minimum allowed legth for each token | Currently set to : 0
pipe['default_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed legth for each token | Currently set to : 99999
>>> pipe['default_name'] has settable params:
pipe['default_name'].setDimension(512) | Info: Number of embedding dimensions | Currently set to : 512
pipe['default_name'].setStorageRef('tfhub_use') | Info: unique reference name for identification | Currently set to : tfhub_use
>>> pipe['sentence_detector'] has settable params:
pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True
pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True
pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False
pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : []
pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False
pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0
pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999
>>> pipe['document_assembler'] has settable params:
pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink
###Markdown
Retrain with new parameters
###Code
# Train longer!
trainable_pipe['classifier_dl'].setMaxEpochs(5)
fitted_pipe = trainable_pipe.fit(train_df.iloc[:100])
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
preds
###Output
precision recall f1-score support
average 0.00 0.00 0.00 29
good 0.65 0.94 0.77 32
poor 0.69 0.95 0.80 39
accuracy 0.67 100
macro avg 0.45 0.63 0.52 100
weighted avg 0.48 0.67 0.56 100
###Markdown
Try training with different Embeddings
###Code
# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!
nlu.print_components(action='embed_sentence')
from sklearn.metrics import classification_report
trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['classifier_dl'].setMaxEpochs(90)
trainable_pipe['classifier_dl'].setLr(0.0005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
#preds
###Output
sent_small_bert_L12_768 download started this may take some time.
Approximate size to download 392.9 MB
[OK!]
precision recall f1-score support
average 0.72 0.67 0.69 500
good 0.85 0.87 0.86 500
poor 0.78 0.83 0.80 500
accuracy 0.79 1500
macro avg 0.78 0.79 0.79 1500
weighted avg 0.78 0.79 0.79 1500
###Markdown
5. Lets save the model
###Code
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
###Output
Stored model in ./models/classifier_dl_trained
###Markdown
6. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
###Code
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('It worked perfectly.')
preds
hdd_pipe.print_info()
###Output
The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :
>>> pipe['document_assembler'] has settable params:
pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink
>>> pipe['sentence_detector'] has settable params:
pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : []
pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True
pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False
pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999
pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0
pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True
pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False
>>> pipe['regex_tokenizer'] has settable params:
pipe['regex_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True
pipe['regex_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+
pipe['regex_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed length for each token | Currently set to : 99999
pipe['regex_tokenizer'].setMinLength(0) | Info: Set the minimum allowed length for each token | Currently set to : 0
>>> pipe['glove'] has settable params:
pipe['glove'].setBatchSize(32) | Info: Batch size. Large values allows faster processing but requires more memory. | Currently set to : 32
pipe['glove'].setCaseSensitive(False) | Info: whether to ignore case in tokens for embeddings matching | Currently set to : False
pipe['glove'].setDimension(768) | Info: Number of embedding dimensions | Currently set to : 768
pipe['glove'].setMaxSentenceLength(128) | Info: Max sentence length to process | Currently set to : 128
pipe['glove'].setIsLong(False) | Info: Use Long type instead of Int type for inputs buffer - Some Bert models require Long instead of Int. | Currently set to : False
pipe['glove'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
>>> pipe['classifier_dl'] has settable params:
pipe['classifier_dl'].setClasses(['average', 'poor', 'good']) | Info: get the tags used to trained this NerDLModel | Currently set to : ['average', 'poor', 'good']
pipe['classifier_dl'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/Training/multi_class_text_classification/NLU_training_multi_class_text_classifier_demo_amazon.ipynb) Training a Deep Learning Classifier with NLU ClassifierDL (Multi-class Text Classification) 3 class Amazon Phone review classifier training]With the [ClassifierDL model](https://nlp.johnsnowlabs.com/docs/en/annotatorsclassifierdl-multi-class-text-classification) from Spark NLP you can achieve State Of the Art results on any multi class text classification problem This notebook showcases the following features : - How to train the deep learning classifier- How to store a pipeline to disk- How to load the pipeline from disk (Enables NLU offline mode)You can achieve these results or even better on this dataset with training data:You can achieve these results or even better on this dataset with test data: 1. Install Java 8 and NLU
###Code
import os
from sklearn.metrics import classification_report
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install pyspark==2.4.7
! pip install nlu > /dev/null
import nlu
###Output
Collecting pyspark==2.4.7
[?25l Downloading https://files.pythonhosted.org/packages/e2/06/29f80e5a464033432eedf89924e7aa6ebbc47ce4dcd956853a73627f2c07/pyspark-2.4.7.tar.gz (217.9MB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 217.9MB 67kB/s
[?25hCollecting py4j==0.10.7
[?25l Downloading https://files.pythonhosted.org/packages/e3/53/c737818eb9a7dc32a7cd4f1396e787bd94200c3997c72c1dbe028587bd76/py4j-0.10.7-py2.py3-none-any.whl (197kB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 204kB 17.7MB/s
[?25hBuilding wheels for collected packages: pyspark
Building wheel for pyspark (setup.py) ... [?25l[?25hdone
Created wheel for pyspark: filename=pyspark-2.4.7-py2.py3-none-any.whl size=218279465 sha256=90dbb9e58f0f2c2d84f268ee8dd9f2f6334927c1a225a2bb63a9208d3133b1ae
Stored in directory: /root/.cache/pip/wheels/34/1f/2e/1e7460f80acf26b08dbb8c53d7ff9e07146f2a68dd5c732be5
Successfully built pyspark
Installing collected packages: py4j, pyspark
Successfully installed py4j-0.10.7 pyspark-2.4.7
###Markdown
2. Download Amazon Unlocked mobile phones dataset https://www.kaggle.com/PromptCloudHQ/amazon-reviews-unlocked-mobile-phonesdataset with unlocked mobile phone reviews in 5 review classes
###Code
! wget http://ckl-it.de/wp-content/uploads/2021/01/Amazon_Unlocked_Mobile.csv
import pandas as pd
test_path = '/content/Amazon_Unlocked_Mobile.csv'
train_df = pd.read_csv(test_path,sep=",")
cols = ["y","text"]
train_df = train_df[cols]
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(train_df, test_size=0.2)
train_df
###Output
_____no_output_____
###Markdown
3. Train Deep Learning Classifier using nlu.load('train.classifier')You dataset label column should be named 'y' and the feature column with text data should be named 'text'
###Code
# load a trainable pipeline by specifying the train. prefix and fit it on a datset with label and text columns
# Since there are no
trainable_pipe = nlu.load('train.classifier')
fitted_pipe = trainable_pipe.fit(train_df.iloc[:50] )
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:50] )
preds
###Output
tfhub_use download started this may take some time.
Approximate size to download 923.7 MB
[OK!]
###Markdown
4. Test the fitted pipe on new example
###Code
fitted_pipe.predict("It worked perfectly .")
###Output
_____no_output_____
###Markdown
5. Configure pipe training parameters
###Code
trainable_pipe.print_info()
###Output
The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :
>>> pipe['classifier_dl'] has settable params:
pipe['classifier_dl'].setMaxEpochs(3) | Info: Maximum number of epochs to train | Currently set to : 3
pipe['classifier_dl'].setLr(0.005) | Info: Learning Rate | Currently set to : 0.005
pipe['classifier_dl'].setBatchSize(64) | Info: Batch size | Currently set to : 64
pipe['classifier_dl'].setDropout(0.5) | Info: Dropout coefficient | Currently set to : 0.5
pipe['classifier_dl'].setEnableOutputLogs(True) | Info: Whether to use stdout in addition to Spark logs. | Currently set to : True
>>> pipe['default_tokenizer'] has settable params:
pipe['default_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+
pipe['default_tokenizer'].setContextChars(['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]) | Info: character list used to separate from token boundaries | Currently set to : ['.', ',', ';', ':', '!', '?', '*', '-', '(', ')', '"', "'"]
pipe['default_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True
pipe['default_tokenizer'].setMinLength(0) | Info: Set the minimum allowed legth for each token | Currently set to : 0
pipe['default_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed legth for each token | Currently set to : 99999
>>> pipe['sentence_detector'] has settable params:
pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True
pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True
pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False
pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : []
pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False
pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0
pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999
>>> pipe['default_name'] has settable params:
pipe['default_name'].setDimension(512) | Info: Number of embedding dimensions | Currently set to : 512
pipe['default_name'].setLoadSP(False) | Info: Whether to load SentencePiece ops file which is required only by multi-lingual models. This is not changeable after it's set with a pretrained model nor it is compatible with Windows. | Currently set to : False
pipe['default_name'].setStorageRef('tfhub_use') | Info: unique reference name for identification | Currently set to : tfhub_use
>>> pipe['document_assembler'] has settable params:
pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink
###Markdown
6. Retrain with new parameters
###Code
# Train longer!
trainable_pipe['classifier_dl'].setMaxEpochs(5)
fitted_pipe = trainable_pipe.fit(train_df.iloc[:100])
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df.iloc[:100],output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
preds
###Output
precision recall f1-score support
average 0.43 0.90 0.58 29
good 0.80 0.84 0.82 38
poor 0.00 0.00 0.00 33
accuracy 0.58 100
macro avg 0.41 0.58 0.47 100
weighted avg 0.43 0.58 0.48 100
###Markdown
7. Try training with different Embeddings
###Code
# We can use nlu.print_components(action='embed_sentence') to see every possibler sentence embedding we could use. Lets use bert!
nlu.print_components(action='embed_sentence')
from sklearn.metrics import classification_report
trainable_pipe = nlu.load('en.embed_sentence.small_bert_L12_768 train.classifier')
# We need to train longer and user smaller LR for NON-USE based sentence embeddings usually
# We could tune the hyperparameters further with hyperparameter tuning methods like gridsearch
# Also longer training gives more accuracy
trainable_pipe['classifier_dl'].setMaxEpochs(90)
trainable_pipe['classifier_dl'].setLr(0.0005)
fitted_pipe = trainable_pipe.fit(train_df)
# predict with the trainable pipeline on dataset and get predictions
preds = fitted_pipe.predict(train_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
#preds
###Output
sent_small_bert_L12_768 download started this may take some time.
Approximate size to download 392.9 MB
[OK!]
precision recall f1-score support
average 0.73 0.66 0.69 392
good 0.84 0.87 0.86 408
poor 0.79 0.83 0.81 400
accuracy 0.79 1200
macro avg 0.79 0.79 0.79 1200
weighted avg 0.79 0.79 0.79 1200
###Markdown
7.1 evaluate on Test Data
###Code
preds = fitted_pipe.predict(test_df,output_level='document')
#sentence detector that is part of the pipe generates sone NaNs. lets drop them first
preds.dropna(inplace=True)
print(classification_report(preds['y'], preds['category']))
###Output
precision recall f1-score support
average 0.70 0.66 0.68 108
good 0.79 0.82 0.80 92
poor 0.75 0.77 0.76 100
accuracy 0.74 300
macro avg 0.74 0.75 0.75 300
weighted avg 0.74 0.74 0.74 300
###Markdown
8. Lets save the model
###Code
stored_model_path = './models/classifier_dl_trained'
fitted_pipe.save(stored_model_path)
###Output
Stored model in ./models/classifier_dl_trained
###Markdown
9. Lets load the model from HDD.This makes Offlien NLU usage possible! You need to call nlu.load(path=path_to_the_pipe) to load a model/pipeline from disk.
###Code
hdd_pipe = nlu.load(path=stored_model_path)
preds = hdd_pipe.predict('It worked perfectly.')
preds
hdd_pipe.print_info()
###Output
The following parameters are configurable for this NLU pipeline (You can copy paste the examples) :
>>> pipe['document_assembler'] has settable params:
pipe['document_assembler'].setCleanupMode('shrink') | Info: possible values: disabled, inplace, inplace_full, shrink, shrink_full, each, each_full, delete_full | Currently set to : shrink
>>> pipe['sentence_detector'] has settable params:
pipe['sentence_detector'].setCustomBounds([]) | Info: characters used to explicitly mark sentence bounds | Currently set to : []
pipe['sentence_detector'].setDetectLists(True) | Info: whether detect lists during sentence detection | Currently set to : True
pipe['sentence_detector'].setExplodeSentences(False) | Info: whether to explode each sentence into a different row, for better parallelization. Defaults to false. | Currently set to : False
pipe['sentence_detector'].setMaxLength(99999) | Info: Set the maximum allowed length for each sentence | Currently set to : 99999
pipe['sentence_detector'].setMinLength(0) | Info: Set the minimum allowed length for each sentence. | Currently set to : 0
pipe['sentence_detector'].setUseAbbreviations(True) | Info: whether to apply abbreviations at sentence detection | Currently set to : True
pipe['sentence_detector'].setUseCustomBoundsOnly(False) | Info: Only utilize custom bounds in sentence detection | Currently set to : False
>>> pipe['regex_tokenizer'] has settable params:
pipe['regex_tokenizer'].setCaseSensitiveExceptions(True) | Info: Whether to care for case sensitiveness in exceptions | Currently set to : True
pipe['regex_tokenizer'].setTargetPattern('\S+') | Info: pattern to grab from text as token candidates. Defaults \S+ | Currently set to : \S+
pipe['regex_tokenizer'].setMaxLength(99999) | Info: Set the maximum allowed length for each token | Currently set to : 99999
pipe['regex_tokenizer'].setMinLength(0) | Info: Set the minimum allowed length for each token | Currently set to : 0
>>> pipe['glove'] has settable params:
pipe['glove'].setBatchSize(32) | Info: Batch size. Large values allows faster processing but requires more memory. | Currently set to : 32
pipe['glove'].setCaseSensitive(False) | Info: whether to ignore case in tokens for embeddings matching | Currently set to : False
pipe['glove'].setDimension(768) | Info: Number of embedding dimensions | Currently set to : 768
pipe['glove'].setMaxSentenceLength(128) | Info: Max sentence length to process | Currently set to : 128
pipe['glove'].setIsLong(False) | Info: Use Long type instead of Int type for inputs buffer - Some Bert models require Long instead of Int. | Currently set to : False
pipe['glove'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
>>> pipe['classifier_dl'] has settable params:
pipe['classifier_dl'].setClasses(['average', 'poor', 'good']) | Info: get the tags used to trained this NerDLModel | Currently set to : ['average', 'poor', 'good']
pipe['classifier_dl'].setStorageRef('sent_small_bert_L12_768') | Info: unique reference name for identification | Currently set to : sent_small_bert_L12_768
|
.ipynb_checkpoints/Project Document-checkpoint.ipynb | ###Markdown
Data intelligence Application Project IntroductionThe goal of the project is to find the best joint bidding and pricing strategy to attract more user on an e-commerce website. In this way we can sell more items and also try to fidelize the users after the first purhcase. First of all we define the scenario, the product we want to sell and the idea behind the choice we make Product to sellThe product that we want to sell is an anti wrinkle cream.We choose this product because nowadays people in general take more care about their beauty and more specifically on their skins. A cream is a product for everyone, easy to sell and to advertise that can be manufactured with low price and sold at an higher price, if well sposorized.A cream is also a product that can be buyed with a fixed frequency (monthly in our case) and, if a person think that the product is good, he is going to buy it over time User Features and ClassesWe have identified 3 classes of potential buyer characterizeb by 2 principal features Features* *Gender*: * Male * Female* *Age*: * Young, before 30y/o * Adult, after 30y/o Classes:| | Female | Male ||-------|:----------:|--------------:|| **Young** | x | || **Adult** | x | x | * **Male-Adult(C1)**: This is the less intrested class in buying the product but we think that the are many pontential buyers, they are not willing to pay much for a cream* **Female-Adult(C2)**: This is the more intrested class, they are willing to spend much for the product, first of all because they have the economic availability and because a cheaper product may be intrepreted as a bad quality product* **Female-Young(C3)**: This class does not have high economic capabilities, so the price need to be less then the price for the Class 2, also because they usually don't care about the quality Enviroment Conversion RateFor the conversion rate we have defined 10 points for each class and using a quintic regression we have estimated a function for each one
###Code
import matplotlib.pyplot as plt
import numpy as np
import math
terms1 = [
2.7201569788151064e-001,
1.0492858071043640e-001,
-7.7219072733113589e-003,
-7.8440033424782515e-005,
1.5070734117821789e-005,
-2.6951793857949948e-007
]
def regressC1(x):
t = 1
r = 0
for c in terms1:
r += c * t
t *= x
return r
terms2 = [
2.8737573993069027e-001,
5.9510040146659365e-002,
-3.3835350515498425e-003,
3.3437236649835021e-004,
-2.1899899222428644e-005,
4.0670620526356083e-007
]
def regressC2(x):
t = 1
r = 0
for c in terms2:
r += c * t
t *= x
return r
terms3 = [
3.3159910846623597e-001,
6.2264260733061391e-002,
-1.0506786208669700e-003,
-5.9692898283301293e-004,
3.3636918621988781e-005,
-5.0878883129873533e-007
]
def regressC3(x):
t = 1
r = 0
for c in terms3:
r += c * t
t *= x
return r
prices=np.intc([2 ,5,7,9, 10,12, 15, 20, 25, 30 ])
fig = plt.figure()
plt.plot(prices,regressC1(prices), label="Male-Adult")
plt.plot(prices,regressC2(prices), label="Female-Adult")
plt.plot(prices,regressC3(prices), label="Female-Young")
plt.title("Conversion Rates")
plt.xlabel("Price (โฌ)")
plt.ylabel("Conversion Rate")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Cost Per ClickThe cost of a single click depend on the bid, we tought that a reasonable random variabile that follow a uniform distribution with a=bid-0.05*bid and b=bid+0.05*bid
###Code
def costPerClick(bid):
s= np.random.uniform(bid-0.05*bid, bid+0.05*bid,1)
return(s)
###Output
_____no_output_____
###Markdown
Number of clickAlso the number of click depend on the bid, and we tought that a reasonable function that simulate the increasing behavior can be the tanh function. We cap the maximum daily click at 500 per day.The randomness is added trought a random variable that follow a uniform distribution which support is$x\in[(1-0.05)*500*tanh(bid),(1+0.05)*500*tanh(bid)]$
###Code
def nrDailyClick(bid):
meanNrClick = math.trunc(500*math.tanh(bid))
nrClick= math.trunc(np.random.uniform(meanNrClick-0.05*meanNrClick, meanNrClick+0.05*meanNrClick,1)[0])
return nrClick
bids= np.linspace(0,3.0, 100)
fig = plt.figure()
for bid in bids:
plt.scatter(bid,nrDailyClick(bid), color="blue")
plt.title("Nr click function")
plt.xlabel("Bid (โฌ)")
plt.ylabel("Nr Daily Click")
plt.show()
###Output
_____no_output_____
###Markdown
Distribution probability over the number of times the user will come backThe probability that the user will come back to the e-commerce website to buy that item by 30 days after the first purchase is simulated trhought this function$p(month)=\frac{month}{month+2}$ where $x$ is the month we are referring.This comes by the fact that the first time the user buy the product we have a 0.33 probability that the user come back, due the fact that we don't know if he like the product and during this period he may find another product with better price.More the user buy the product in a row more is the probability that he will buy next time. This because he is fidelized and less prone to change it with other product independently on the price
###Code
def nextTimeProbability(month):
return(month/(2+month))
months=np.linspace(1,12,12)
fig = plt.figure()
for month in months:
plt.scatter(month,nextTimeProbability(month), color="blue")
plt.title("Next time probability")
plt.xlabel("Month")
plt.ylabel("Probability")
plt.show()
###Output
_____no_output_____
###Markdown
Step 1 The goal of the first step is to formulate the objective function, with the assumptions that once a user makes a purchase of an item that costs a certain price, then the ecommerce will propose the same price to future visits of the same user and this user will buy the product for sure. * $i$ is the class* $j$ is the price of the product* $p_{ij}$ is the probability that the class will buy the product at a given price $j$ (conversion rate)* $c$ is the fixed cost* $x$ is the bid* $v(x)$ is the stochastic cost per click depending on the bid* $t$ is the time frame we consider.* $n(x)$ is the stochastic number of click depending on the bid x.* $\varphi$ is the number of times a customer will buy the product againThe goal is to maximize the profit, which yields the following model\begin{equation*} max \sum_{0}^{T} \sum_{i}^{3}(\varphi \cdot p_{ij}(j-c)-v(x))*n_{i,t}(x)\end{equation*}The joint pricing/bidding algorithm is then as followingFor every class $i \in I$For every possible bid $x \in X$For possible price $p \in P$\begin{equation*} j,x = arg max(\varphi \cdot p_{ij}(j-c)-v(x))n_{i,t}(x)\end{equation*}the complexity is in $O(|I|*|X|*|P|)$ Step 2In the step 2 we need to consider the online learning version of the above optimization problem when the parameters are not know.The random variables wich we don't know a priori are:* Number of daily click* Cost per click* Conversion RateThe model for each one of the random variable are explained in the Enviroment. Potential delay in the feedbackThe potential delay in the feedback is given by the fact that once a user click on the ad he may need many days to decide wether complete the purchase or not, so we need to take track of the sigle user using for example cookies. In this way we can use this data to estimate also e number of user that didn't make the purchase we click the ad and use this information futurely to update estimation. Step 3In the third step we need to consider the case in which the bid is fixed and try to learn in online fashion the best pricing strategy when there is no distinction among the classes. We also assume that the number of daily click and the daily cost per click are known.To do that we first of all make an average of the three different conversion rate of the three classes.We fix the bid and the cost per click as 1.0 while the number of daily click is set to 380.Every day we pull an arm that represent a price. The experiment lasts 365 days.For every click, we play a round and we get a reward that can be 0(user didn't buy the product) or 1(the user buy have bought the product). If it is 0 we are losing money cause we have to pay the click, otherwise we increment the daily reward.The cumulative reward is calculated as follow:
###Code
cumRewardTS+=reward*(prezzi[pulled_armTS]/max_prezzo)
cumRewardUCB1+=reward*(prezzi[pulled_armTS]/max_prezzo)
###Output
_____no_output_____
###Markdown
This is done because we need to normalize the reward to give more importance to an higher price with respect a lower price. We cannot simply consider the reward itself because the lower prices provide more cumulative rewards in absolute but maybe provide less revenue. Is better to sold 1 cream at 20 euro thant sold 5 cream at 3 euro. At the end of the day we update the distributions associated to the arms.
###Code
ts_learner.update(pulled_armTS,cumRewardTS/nrClick)
ucb1_learner.update(pulled_armUCB1, cumRewardUCB1/nrClick)
###Output
_____no_output_____
###Markdown
As we expected the final revenue provided by the TS is higher than the revenue provided by UCB1. And we have also theoretical guarantee that the regret goes to 0 faster in TS  Step 4In the 4th step we make class differentiation.For doing that we have defined a single TS learner. Every day we pull an arm that is the price that is presented to every user indipendently of the class he belongs to.We assume that the number of click are equally distributed among the three classes.In this step when a user see the ad he make a choise accordingly to the specific class probability, this is the big change with respect the step 3. For experiment purpose we simulate also the fact that we can make discrimination at advertise level, this means that user belonging to a different class see different prices. To do this we define three different learners, one for each class assuming that the number of daily click are equally distributed
###Code
for t in range (0,T):
#pull TS arms
pulled_armC1=ts_learnerClass1.pull_arm()
pulled_armC2=ts_learnerClass2.pull_arm()
pulled_armC3=ts_learnerClass2.pull_arm()
pulled_armSingle=ts_learnerSingle.pull_arm()
#I pull a new arm every day, and i can distinguish the three different
#class, and propose to each of them a different class
#Class 1
for x in range(0, nrClickPerClass):
#Getting reward from his best arm
reward= envClass1.round(pulled_armC1) #calcolo il reward
cumRewardTSC1+=reward*(prezzi[pulled_armC1]/max_prezzo)
dailyTS+=reward*prezzi[pulled_armC1]-costPerClick
#using single learner
reward= envClass1.round(pulled_armSingle)
dailyTSSingle+= reward*prezzi[pulled_armSingle]-costPerClick
cumRewardSingle+=reward*(prezzi[pulled_armSingle]/max_prezzo)
#Class 2
for x in range(0, nrClickPerClass):
#Getting reward from his best arm
reward= envClass2.round(pulled_armC2) #calcolo il reward
cumRewardTSC2+=reward*(prezzi[pulled_armC2]/max_prezzo)
dailyTS+=reward*prezzi[pulled_armC2]-costPerClick
#using single learner
reward= envClass2.round(pulled_armSingle)
dailyTSSingle+= reward*prezzi[pulled_armSingle]-costPerClick
cumRewardSingle+=reward*(prezzi[pulled_armSingle]/max_prezzo)
#Class 3
for x in range(0, nrClickPerClass):
#Getting reward from his best arm
reward= envClass3.round(pulled_armC3) #calcolo il reward
cumRewardTSC3+=reward*(prezzi[pulled_armC3]/max_prezzo)
dailyTS+=reward*prezzi[pulled_armC3]-costPerClick
#using single learner
reward= envClass3.round(pulled_armSingle)
dailyTSSingle+= reward*prezzi[pulled_armSingle]-costPerClick
cumRewardSingle+=reward*(prezzi[pulled_armSingle]/max_prezzo)
#make the average of the cumulative reward
totalRevenueTS.append(dailyTS)
totalRevenueSingle.append(dailyTSSingle)
ts_learnerClass1.update(pulled_armC1,cumRewardTSC1/nrClickPerClass)
ts_learnerClass2.update(pulled_armC2,cumRewardTSC2/nrClickPerClass)
ts_learnerClass3.update(pulled_armC3,cumRewardTSC3/nrClickPerClass)
ts_learnerSingle.update(pulled_armSingle, cumRewardSingle/(3*nrClickPerClass))
###Output
_____no_output_____ |
01 - Statistics & Probability/Statistics Notes/ipynb/01 Statistics Visualizing Information.ipynb | ###Markdown
Visualizing Information
###Code
# importing required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
###Output
_____no_output_____
###Markdown
Analysing "Data Flick's solutions private limited"importing data
###Code
no_units = pd.read_excel('./data/df_genre_units.xlsx') # reading data
no_units = no_units.set_index('Genre')
no_units
plt.figure(figsize=(6,6))
no_units['Units sold'].plot.pie(autopct='%.0f%%')
plt.show()
###Output
_____no_output_____
###Markdown
**Visualizing statisfied customers**
###Code
satisfied = pd.read_excel('./data/statistics/df_statisfied.xlsx') # reading data
satisfied = satisfied.set_index('Genre')
satisfied
plt.figure(figsize=(6,6))
satisfied['Satisfied'].plot.pie(autopct='%.0f%%')
###Output
_____no_output_____
###Markdown
**Bar plot **> vertical plot
###Code
plt.figure(figsize=(10,5))
satisfied['Satisfied'].plot.bar()
plt.ylabel('Statified customers in %')
plt.show()
###Output
_____no_output_____
###Markdown
> horizontal plot
###Code
plt.figure(figsize=(10,5))
satisfied['Satisfied'].plot.barh()
plt.xlabel('Statified customers in %')
plt.show()
satisfied['unsatisfied'] = 100-satisfied['Satisfied']
satisfied
###Output
_____no_output_____
###Markdown
Split Category bar plot> Vertical
###Code
plt.figure(figsize=(10,5))
satisfied.plot.bar()
plt.ylabel('Statified customers in %')
plt.show()
###Output
_____no_output_____
###Markdown
> Horizontal
###Code
plt.figure(figsize=(15,7))
satisfied.plot.barh()
plt.xlabel('Statified customers in %')
plt.show()
###Output
_____no_output_____
###Markdown
**Split Category bar plot**> Vertical
###Code
plt.figure(figsize=(10,5))
satisfied.plot.bar(stacked = True)
plt.ylabel('Statified customers in %')
plt.show()
plt.figure(figsize=(10,5))
satisfied.plot.barh(stacked = True)
plt.xlabel('Statified customers in %')
plt.show()
###Output
_____no_output_____
###Markdown
**Execrise** The CEO needs another chart for the keynote presentation. Hereโs the data; see if you can sketch the bar chart 
###Code
sales_unit = pd.read_excel('./data/statistics/df_salesunit.xlsx')
sales = pd.read_excel('./data/statistics/df_sales.xlsx')
sales_unit = sales_unit.set_index('Continent ')
sales_unit
sales = sales.set_index('Genre')
sales
###Output
_____no_output_____
###Markdown
**Charts**
###Code
sales_unit['Sales (unit)'].plot.pie()
plt.title('Pie chart')
# bar chart
sales_unit.plot.bar()
plt.title('bar chart')
# bar chart
sales_unit.plot.barh()
plt.title('horizontal bar chart')
#plt.show()
###Output
_____no_output_____
###Markdown
**Sales**
###Code
sales.keys()
%matplotlib inline
# bar chart
sales.plot.bar()
plt.title('bar chart')
# bar chart
sales.plot.barh()
plt.title('horizontal bar chart')
#plt.show()
###Output
_____no_output_____
###Markdown
**Excerise - 2**
###Code
score = pd.read_excel('./data/statistics/df_scores.xlsx')
score.keys()
score = score.set_index('Scores ')
score
###Output
_____no_output_____
###Markdown
**Charts**
###Code
score['Frequency'].plot.pie()
plt.title('Pie chart')
# bar chart
score.plot.bar()
plt.title('bar chart')
# bar chart
score.plot.barh()
plt.title('horizontal bar chart')
#plt.show()
###Output
_____no_output_____ |
MLCCNumpy_Exercises.ipynb | ###Markdown
Numpy Exercises1) Create a uniform subdivision of the interval -1.3 to 2.5 with 64 subdivisions
###Code
import numpy as np #import numpy
a = np.linspace(-1.3, 2.5, num = 64)
print (a)
###Output
[-1.3 -1.23968254 -1.17936508 -1.11904762 -1.05873016 -0.9984127
-0.93809524 -0.87777778 -0.81746032 -0.75714286 -0.6968254 -0.63650794
-0.57619048 -0.51587302 -0.45555556 -0.3952381 -0.33492063 -0.27460317
-0.21428571 -0.15396825 -0.09365079 -0.03333333 0.02698413 0.08730159
0.14761905 0.20793651 0.26825397 0.32857143 0.38888889 0.44920635
0.50952381 0.56984127 0.63015873 0.69047619 0.75079365 0.81111111
0.87142857 0.93174603 0.99206349 1.05238095 1.11269841 1.17301587
1.23333333 1.29365079 1.35396825 1.41428571 1.47460317 1.53492063
1.5952381 1.65555556 1.71587302 1.77619048 1.83650794 1.8968254
1.95714286 2.01746032 2.07777778 2.13809524 2.1984127 2.25873016
2.31904762 2.37936508 2.43968254 2.5 ]
###Markdown
2) Generate an array of length 3n filled with the cyclic pattern 1, 2, 3 `np.resize(array_name, num) ` gives you a cyclic pattern of the array repeated 'num' times
###Code
n = int(input())
b = np.array([1,2,3])
b_modified = np.resize(b, 3*n)
print (b_modified)
###Output
4
[1 2 3 1 2 3 1 2 3 1 2 3]
###Markdown
3) Create an array of the first 10 odd integers. `np.arange(start, stop, step)` takes an optional argument which increments start by that step and ends just before stop
###Code
c = np.arange(1,20,2)
print (c)
###Output
[ 1 3 5 7 9 11 13 15 17 19]
###Markdown
4) Find intersection of a and b `np.intersect1d(a,b)` gives the unique and common values between two arrays a and b
###Code
#expected output array([2, 4])
a = np.array([1,2,3,2,3,4,3,4,5,6])
b = np.array([7,2,10,2,7,4,9,4,9,8])
res = np.intersect1d(a, b)
print (res)
###Output
[2 4]
###Markdown
5) Reshape 1d array a to 2d array of 2X5 np.reshape(array_name, (shape)) takes two arguments - an array to which the operation is to be performed and the shape to which the array will be shaped
###Code
a = np.arange(10)
a_modified = np.reshape(a,(2,5))
print (a_modified)
###Output
[[0 1 2 3 4]
[5 6 7 8 9]]
###Markdown
6) Create a numpy array to list and vice versa np.asarray(list_argument) converts a list to a numpy array
###Code
#Convert list to numpy array
list1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
array1 = np.asarray(list1)
print (array1)
print (type(array1))
###Output
[1 2 3 4 5 6 7 8 9]
<class 'numpy.ndarray'>
###Markdown
np.ndarray.tolist(array_argument) converts a numpy array to a list
###Code
#Convert numpy array to list
list2 = np.ndarray.tolist(array1)
print (list2)
print (type(list2))
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
<class 'list'>
###Markdown
7) Create a 10 x 10 arrays of zeros and then "frame" it with a border of ones. np.pad(array_name, pad_width = depth_of_the_border, mode='constant', constant_values = fill_value) gives a border or a pad around an array of depth equal to pad_width filled with values equal to constant_values
###Code
a = np.zeros(shape=(10,10), dtype='int')
a = np.pad(a, pad_width=1, mode='constant', constant_values=1)
print (a)
###Output
[[1 1 1 1 1 1 1 1 1 1 1 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 0 0 0 0 0 0 0 0 0 0 1]
[1 1 1 1 1 1 1 1 1 1 1 1]]
###Markdown
8) Create an 8 x 8 array with a checkerboard pattern of zeros and ones using a slicing+striding approach. a[start:stop] denotes a slice of the array ait can take a third optional argument which denotes the step with which it elements are chosen from the arraya[start:stop:stride] starts taking values from 'start' till 'stop',moving forward by 'stride' steps
###Code
a = np.zeros(shape = (8,8))
a[1::2, ::2] = 1
a[::2, 1::2] = 1
print (a)
###Output
[[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]
[0. 1. 0. 1. 0. 1. 0. 1.]
[1. 0. 1. 0. 1. 0. 1. 0.]]
|
week-06/ch07-oop-shortcuts.ipynb | ###Markdown
OOP shortcuts* Built-in functions that take care of common tasks in one call (so useful)* File I/O and context managers* An alternative to method overloading* Functions as objects *len, reverse, enumerate*..., *all, any*..., *eval, exec, compile* (I don't use these...)*hasattr, getattr, setattr, and delattr*, which allow attributes on anobject to be manipulated by their string names.*zip*, which takes two or more sequences and returns a new sequence of tuples, where each tuple contains a single value from each sequence.And many more! See the interpreter help documentation for each of the functions listed in dir(\__builtins__).
###Code
a_list = [1,2,3,4,5]
print('len() builtin: {}'.format(len(a_list)))
print('__len__ attr: {}'.format(a_list.__len__()))
print()
print('look at reversed')
print('a_list: {}'.format(a_list))
print('reversed(a_list): {}'.format(reversed(a_list)))
print('the reversed list: {}'.format(list(reversed(a_list))))
print()
for item in reversed(a_list):
print(item)
a_string_list = ['one', 'two', 'three', 'four', 'five']
for i,item in enumerate(a_string_list):
print('item {}: {}'.format(i, item))
print()
for i,item in zip(a_list, a_string_list):
print('item {}: {}'.format(i, item))
print('mismatched lengths: takes first n-elements of second item in zip')
for i,item in zip(a_list[1:3], a_string_list):
print('item {}: {}'.format(i, item))
dir(__builtins__)
dir(sum)
###Output
_____no_output_____
###Markdown
File I/O and context managers* writing* reading* context managers
###Code
contents = "Some file contents\n"
file = open("filename.txt", "w")
file.write(contents)
file.close()
file = open("filename2.txt", "w")
file.writelines([contents]*3)
file.close()
# this overwrites every time... also explicit management of open and close
# binary add wb, rb
dir(file)
###Output
_____no_output_____
###Markdown
context managers* with open(...) as file... calls __enter__ and __exit__ methods on the file object* cleans up need for 'startup' and 'cleanup' code
###Code
with open("filename.txt", "w") as file:
file.write(contents)
with open("filename_seq_cm.txt", "w") as file:
file.writelines([contents]*3)
with open("./a-text-file.txt", "r") as file:
for i, line in enumerate(file.readlines()):
print("line {}: {}".format(i+1, line))
# custom string joiner
class StringJoiner(list):
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.result = "".join(self)
import random, string
with StringJoiner() as joiner:
for i in range(15):
joiner.append(random.choice(string.ascii_letters))
print(joiner.result)
# but...
same_thing = ''.join([random.choice(string.ascii_letters) for i in range(15)])
print(same_thing)
###Output
VjMLIieQzDfnaDl
QkxyOnpvtLqjdFA
###Markdown
An alternative to method overloading* Python doesn't permit multiple methods with the same name!!* instead, write the function with expectation of argument type
###Code
def func1(arg1):
print('original func1')
pass
def func1(arg2):
print('the function has been replaced')
pass
func1('some arg')
Python functions accept:
* positional arguments
* keyword arguments (with default values)
def function(pos1, pos2, kwd1='None', kwd2=5):
print('pos1: {}'.format(pos1))
print('pos2: {}'.format(pos2))
print('kwd1: {}'.format(kwd1))
print('kwd2: {}'.format(kwd2))
pass
#function(1)
#function(1,2)
#function(1, 2, kwd1='2')
#function(2,['hi'], kwd1={'some':'key-value'}, kwd2=('a', 'tuple'))
# specify all inputs out of order
function(pos2=2, pos1=['hi'], kwd2={'some':'key-value'}, kwd1=('a', 'tuple'))
# variable argument lists
def extract_list(*somelist):
print(somelist)
for item in somelist:
print(item)
extract_list()
#extract_list(1)
#extract_list(tuple(range(10))) # feel free to fiddle with this one
#extract_list(1,2,3,4)
#extract_list('item1', 'item2', 'item3')
class Options:
default_options = {
'port': 21,
'host': 'localhost',
'username': None,
'password': None,
'debug': False
}
def __init__(self, **kwargs):
self.options = dict(Options.default_options) # set the default options
self.options.update(kwargs) # update based on user input, can also add keys for the options
def __getitem__(self, key):
return self.options[key]
# sample1 = Options(username='dusty', password='drowssap', some_other_option=True)
# print(sample1.options)
sample2 = Options()
print(sample2.options)
sample2['port']
###Output
{'port': 21, 'host': 'localhost', 'username': None, 'password': None, 'debug': False}
###Markdown
Ordering of arguments:* positional* \*list argument* keyword arguments* \** dictionary to hold anything else
###Code
import shutil
import os.path
def augmented_move(target_folder, *filenames, verbose=False, **specific):
'''Move all filenames into the target_folder, allowing specific treatment of certain files.'''
# # print args
print('target_folder: {}'.format(target_folder))
print('filenames: {}'.format(filenames))
print('verbose: {}'.format(verbose))
print('specific: {}'.format(specific))
def print_verbose(message, filename):
'''print the message only if verbose is enabled'''
if verbose:
print(message.format(filename))
## augmented_move block
# to get the example to work...
filenames = list(filenames) # comes in as a tuple
filenames.extend(specific.keys()) # use list.extend
for filename in filenames:
target_path = os.path.join(target_folder, filename)
if filename in specific:
if specific[filename] == 'ignore':
print_verbose("Ignoring {}", filename)
elif specific[filename] == 'copy':
print_verbose("Copying {}", filename)
shutil.copyfile(filename, target_path)
else:
try:
print_verbose("Moving {}...", filename)
shutil.move(filename, target_path)
except:
print('file {} does not exist'.format(filename))
augmented_move("target_folder", "one", verbose=True, two="ignore", three="ignore")
###Output
target_folder: target_folder
filenames: ('one',)
verbose: True
specific: {'two': 'ignore', 'three': 'ignore'}
Moving one...
file one does not exist
Ignoring two
Ignoring three
###Markdown
Unpacking arguments* can provide arguments as sequence, list, or dictionary
###Code
def show_args(arg1, arg2, arg3="THREE"):
print(arg1, arg2, arg3)
some_args = range(2)
more_args = {
"arg1": "ONE",
"arg2": "TWO"
}
show_args(*some_args)
show_args(**more_args)
# a more practical example for us...
from shapely.geometry import box
bounds = [0,0,5,10]
box(*bounds)
# help(box)
###Output
_____no_output_____
###Markdown
Treating functions like objects* if you want, you can set descriptions or any other named attribute on a function * why would you want to do this?* you have access to the \__name__ attributeLet's go through the timer example because I found it odd...
###Code
def function_object():
'''help doc'''
pass
function_object.whatever = 'something'
function_object.whatever, function_object.__name__
# help(function_object)
###Output
_____no_output_____
###Markdown
PyTorch DataLoader example - Sampling an image -- won't work if you don't have the data/libraries... so don't worry about it!! I've kept it for the figures.
###Code
from test_unet_helpers import get_points_list, gtDatasetSampler2
gt_image_2 = '../smart_cities/rasters/union_impervious_raster_2_0_0_wgs84.tif'
dg_id = '1030010057062200'
shpfile = '../smart_cities/union/union.shp'
coords = get_points_list(gt_image_2, dg_id, shpfile, num=100)
coords
from torch.utils.data import Dataset
import rasterio
import numpy as np
import torch
class gtDatasetSampler2(Dataset):
"""DG Dataset"""
def __init__(self, gtfile, coord_pair, window_size=64, transform=None):
"""
Args:
image_dir(string): the folder containing the DG images
transform (callable, optional): Optional transform to be applies
"""
self.image_file = gtfile
self.transform = transform
self.coords = coord_pair
self.window_size = window_size
def __getitem__(self, idx):
with rasterio.open(self.image_file, 'r') as src:
temp = src.read()
# get the window
r,c = self.coords[idx]
r_start = int(r - self.window_size/2)
r_end = int(r_start + self.window_size)
c_start = int(c - self.window_size/2)
c_end = int(c_start + self.window_size)
# extract the window
img_arr = temp[0,r_start:r_end, c_start:c_end]
img_arr = np.expand_dims(img_arr, axis=0)
# set no data to 0
img_arr[img_arr == 3] = 0
# convert to tensor
img_arr = torch.from_numpy(img_arr).float()
return img_arr
def __len__(self):
return len(self.coords)
from torchvision.transforms import ToTensor, Normalize, Compose
from torch.utils.data import DataLoader
gt_transform = Compose([
ToTensor()
])
gt_dataset_train = gtDatasetSampler2(gt_image_2, coords, transform=gt_transform)
# gt_dl_train = DataLoader(gt_dataset_train, batch_size=5, shuffle=False)
len(gt_dataset_train), len(coords)
from matplotlib import pyplot as plt
plt.imshow(gt_dataset_train[0][0])
plt.show()
gt_dataset_train[1].shape
###Output
_____no_output_____ |
docs/notebooks/method.ipynb | ###Markdown
Edge behavior and interiorsThis notebook illustrates the edge behavior (when a grid point falls on the edge of a polygon) and how polygon interiors are treated. PreparationImport regionmask and check the version:
###Code
import regionmask
regionmask.__version__
###Output
_____no_output_____
###Markdown
Other imports
###Code
import xarray as xr
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib import colors as mplc
from shapely.geometry import Polygon
###Output
_____no_output_____
###Markdown
Define some colors:
###Code
color1 = "#9ecae1"
color2 = "#fc9272"
color3 = "#cab2d6"
cmap1 = mplc.ListedColormap([color1])
cmap2 = mplc.ListedColormap([color2])
cmap3 = mplc.ListedColormap([color3])
###Output
_____no_output_____
###Markdown
MethodsRegionmask offers three "methods"* to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.3. `pygeos`: a faster alternative for irregular grids. This is method is preferred over (2) if the optional dependency pygeos is installed. Uses `pygeos.STRtree` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. Methods (1) and (2) have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) and (3) subtract a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1).\*Note that all "methods" yield the same results. Edge behaviorThe edge behavior determines how points that fall on the outline of a region are treated. It's easiest to see the edge behaviour in an example. ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border:
###Code
outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]])
region = regionmask.Regions([outline])
ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds(
*(-161, -29, 2), *(75, 13, -2)
)
print(ds_US)
###Output
_____no_output_____
###Markdown
Let's create a mask with each of these methods:
###Code
mask_rasterize = region.mask(ds_US, method="rasterize")
mask_shapely = region.mask(ds_US, method="shapely")
mask_pygeos = region.mask(ds_US, method="pygeos")
###Output
_____no_output_____
###Markdown
.. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword.
###Code
Plot the masked regions:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)mask_pygeos.plot(ax=axes[2], cmap=cmap3, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely")axes[2].set_title("pygeos");
###Code
Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but follows what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows.
### SREX regions
Create a global dataset:
###Output
_____no_output_____
###Markdown
ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("edge points are assigned to the left polygon", fontsize=9);
###Code
Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells.
### Points at -180ยฐE (0ยฐE) and -90ยฐN
The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region.
###Output
_____no_output_____
###Markdown
.. note:: From version 0.8 this applies only if ``wrap_lon`` is *not* set to ``False``. If wrap_lon is set to False `regionmask` assumes the coordinates are not lat and lon coordinates.
###Code
We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid:
###Output
_____no_output_____
###Markdown
outline_global = np.array( [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]])region_global = regionmask.Regions([outline_global])lon = np.arange(-180, 180, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat)
###Code
Create the masks:
###Output
_____no_output_____
###Markdown
setting `wrap_lon=False` turns this feature offmask_global_nontreat = region_global.mask(lon, lat, wrap_lon=False)mask_global = region_global.mask(lon, lat)
###Code
And illustrate the issue:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.3", lw=0.5, transform=ccrs.PlateCarree())ax = axes[0] work around for SciTools/cartopy/issues/1845mask_global_nontreat = mask_global_nontreat.fillna(1)mask_global_nontreat.plot(ax=ax, colors=[color1, "none"], levels=2, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Not treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, **opt)ax.set_title("Treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False)
###Code
In the example the region spans the whole globe and there are gridpoints at -180ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b):
Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region
This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE and not the one at -180ยฐE (this is consistent with assigning points to the polygon *left* from it) and (ii) only the points at -90ยฐN get assigned to the region above.
This is illustrated in the figure below:
###Output
_____no_output_____
###Markdown
outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot( line_kws=dict(color="b15928", zorder=3, lw=1), add_label=False,)ax.plot(LON, LAT, "o", color="0.3", ms=2, transform=ccrs.PlateCarree(), zorder=5) work around for SciTools/cartopy/issues/1845mask_global_2regions = mask_global_2regions.fillna(2)mask_global_2regions.plot(ax=ax, colors=[color1, color2, "none"], levels=3, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.5)ax.outline_patch.set_zorder(1);
###Code
.. note:: This only applies if the border of the region falls *exactly* on the point. One way to avoid the problem is to calculate the `fractional overlap <https://github.com/regionmask/regionmask/issues/38>`_ of each gridpoint with the regions (which is not yet implemented).
###Output
_____no_output_____
###Markdown
Polygon interiors`Polygons` can have interior boundaries ('holes'). regionmask unmasks these regions. ExampleLet's test this on an example and define a `region_with_hole`:
###Code
interior = np.array(
[
[-86.0, 44.0],
[-86.0, 34.0],
[-94.0, 34.0],
[-94.0, 44.0],
[-86.0, 44.0],
]
)
poly = Polygon(outline, holes=[interior])
region_with_hole = regionmask.Regions([poly])
mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize")
mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely")
mask_hole_pygeos = region_with_hole.mask(ds_US, method="pygeos")
f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree()))
opt = dict(add_colorbar=False, ec="0.5", lw=0.5)
mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)
mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt)
mask_hole_pygeos.plot(ax=axes[2], cmap=cmap3, **opt)
for ax in axes:
region_with_hole.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))
ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree())
ax.coastlines(lw=0.25)
ax.plot(
ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
axes[0].set_title("rasterize")
axes[1].set_title("shapely")
axes[2].set_title("pygeos");
###Output
_____no_output_____
###Markdown
Note how the edge behavior of the interior is inverse to the edge behavior of the outerior. Caspian SeaThe Caspian Sea is defined as polygon interior.
###Code
land110 = regionmask.defined_regions.natural_earth.land_110
mask_land110 = land110.mask(ds_GLOB)
f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))
mask_land110.plot(ax=ax, cmap=cmap2, add_colorbar=False)
ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree())
ax.coastlines(resolution="50m", lw=0.5)
ax.plot(
ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree())
ax.set_title("Polygon interiors are unmasked");
###Output
_____no_output_____
###Markdown
Edge behavior and interiorsThis notebook illustrates the edge behavior (when a grid point falls on the edge of a polygon) and how polygon interiors are treated. PreparationImport regionmask and check the version:
###Code
import regionmask
regionmask.__version__
###Output
_____no_output_____
###Markdown
Other imports
###Code
import xarray as xr
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib import colors as mplc
from shapely.geometry import Polygon
###Output
_____no_output_____
###Markdown
Define some colors:
###Code
color1 = "#9ecae1"
color2 = "#fc9272"
color3 = "#cab2d6"
cmap1 = mplc.ListedColormap([color1])
cmap2 = mplc.ListedColormap([color2])
cmap3 = mplc.ListedColormap([color3])
cmap12 = mplc.ListedColormap([color1, color2])
###Output
_____no_output_____
###Markdown
MethodsRegionmask offers three "methods"* to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.3. `pygeos`: a faster alternative for irregular grids. This is method is preferred over (2) if the optional dependency pygeos is installed. Uses `pygeos.STRtree` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. All methods have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) and (3) subtract a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1).\*Note that all "methods" yield the same results. Edge behaviorThe edge behavior determines how points that fall on the outline of a region are treated. It's easiest to see the edge behaviour in an example. ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border:
###Code
outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]])
region = regionmask.Regions([outline])
ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds(
*(-161, -29, 2), *(75, 13, -2)
)
print(ds_US)
###Output
_____no_output_____
###Markdown
Let's create a mask with each of these methods:
###Code
mask_rasterize = region.mask(ds_US, method="rasterize")
mask_shapely = region.mask(ds_US, method="shapely")
mask_pygeos = region.mask(ds_US, method="pygeos")
###Output
_____no_output_____
###Markdown
.. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword.
###Code
Plot the masked regions:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)mask_pygeos.plot(ax=axes[2], cmap=cmap3, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely")axes[2].set_title("pygeos");
###Code
Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but follows what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows.
### SREX regions
Create a global dataset:
###Output
_____no_output_____
###Markdown
ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("edge points are assigned to the left polygon", fontsize=9);
###Code
Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells.
### Points at -180ยฐE (0ยฐE) and -90ยฐN
The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region.
###Output
_____no_output_____
###Markdown
.. note:: From version 0.8 this applies only if ``wrap_lon`` is *not* set to ``False``. If wrap_lon is set to False `regionmask` assumes the coordinates are not lat and lon coordinates.
###Code
We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid:
###Output
_____no_output_____
###Markdown
almost 360 to avoid wrap-around for the plotlon_max = 360.0 - 1e-10outline_global = np.array([[0, 90], [0, -90], [lon_max, -90], [lon_max, 90]])region_global = regionmask.Regions([outline_global])lon = np.arange(0, 360, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat)
###Code
Create the masks:
###Output
_____no_output_____
###Markdown
setting `wrap_lon=False` turns this feature offmask_global_nontreat = region_global.mask(LON, LAT, wrap_lon=False)mask_global = region_global.mask(LON, LAT)
###Code
And illustrate the issue:
###Output
_____no_output_____
###Markdown
proj = ccrs.PlateCarree(central_longitude=180)f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=proj))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.2", lw=0.25, transform=ccrs.PlateCarree())ax = axes[0]mask_global_nontreat.plot(ax=ax, cmap=cmap1, x="lon", y="lat", **opt)ax.set_title("Not treating points at 0ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, x="lon", y="lat", **opt)ax.set_title("Treating points at 0ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False)
###Code
In the example the region spans the whole globe and there are gridpoints at 0ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b):
Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region
This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE and not the one at -180ยฐE (this is consistent with assigning points to the polygon *left* from it) and (ii) only the points at -90ยฐN get assigned to the region above.
This is illustrated in the figure below:
###Output
_____no_output_____
###Markdown
outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot( line_kws=dict(color="b15928", zorder=3, lw=1.5), add_label=False,)ax.plot( LON, LAT, "o", color="0.3", lw=0.25, ms=2, transform=ccrs.PlateCarree(), zorder=5)mask_global_2regions.plot(ax=ax, cmap=cmap12, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.25)ax.outline_patch.set_zorder(1);
###Code
.. note:: This only applies if the border of the region falls *exactly* on the point. One way to avoid the problem is to calculate the fractional overlap (see :issue:`38`) of each gridpoint with the regions (which is not yet implemented).
###Output
_____no_output_____
###Markdown
Polygon interiors`Polygons` can have interior boundaries ('holes'). regionmask unmasks these regions. ExampleLet's test this on an example and define a `region_with_hole`:
###Code
interior = np.array(
[
[-86.0, 44.0],
[-86.0, 34.0],
[-94.0, 34.0],
[-94.0, 44.0],
[-86.0, 44.0],
]
)
poly = Polygon(outline, holes=[interior])
region_with_hole = regionmask.Regions([poly])
mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize")
mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely")
mask_hole_pygeos = region_with_hole.mask(ds_US, method="pygeos")
f, axes = plt.subplots(1, 3, subplot_kw=dict(projection=ccrs.PlateCarree()))
opt = dict(add_colorbar=False, ec="0.5", lw=0.5)
mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)
mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt)
mask_hole_pygeos.plot(ax=axes[2], cmap=cmap3, **opt)
for ax in axes:
region_with_hole.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))
ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree())
ax.coastlines(lw=0.25)
ax.plot(
ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
axes[0].set_title("rasterize")
axes[1].set_title("shapely")
axes[2].set_title("pygeos");
###Output
_____no_output_____
###Markdown
Note how the edge behavior of the interior is inverse to the edge behavior of the outerior. Caspian SeaThe Caspian Sea is defined as polygon interior.
###Code
land110 = regionmask.defined_regions.natural_earth_v5_0_0.land_110
mask_land110 = land110.mask(ds_GLOB)
f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))
mask_land110.plot(ax=ax, cmap=cmap2, add_colorbar=False)
ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree())
ax.coastlines(resolution="50m", lw=0.5)
ax.plot(
ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree())
ax.set_title("Polygon interiors are unmasked");
###Output
_____no_output_____
###Markdown
Edge behavior and interiorsThis notebook illustrates the edge behavior and how Polygon interiors are treated.
###Code
.. note:: From version 0.5 ``regionmask`` treats points on the region borders differently and also considers poygon interiors (holes), e.g. the Caspian Sea in ``natural_earth.land_110`` region.
###Output
_____no_output_____
###Markdown
PreparationImport regionmask and check the version:
###Code
import regionmask
regionmask.__version__
###Output
_____no_output_____
###Markdown
Other imports
###Code
import xarray as xr
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib import colors as mplc
from shapely.geometry import Polygon
###Output
_____no_output_____
###Markdown
Define some colors:
###Code
cmap1 = mplc.ListedColormap(["#9ecae1"])
cmap2 = mplc.ListedColormap(["#fc9272"])
cmap3 = mplc.ListedColormap(["#cab2d6"])
cmap_2col = mplc.ListedColormap(["#9ecae1", "#fc9272"])
###Output
_____no_output_____
###Markdown
MethodsRegionmask offers two methods to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. Methods (1) and (2) have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) subtracts a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio/1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1). Edge behaviorAs of version 0.5 `regionmask` has a new edge behavior - points that fall of the outline of a region are now consistently treated. This was not the case in earlier versions (xref [matplotlib/matplotlib9704](https://github.com/matplotlib/matplotlib/issues/9704)). It's easiest to see the edge behaviour in an ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border:
###Code
outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]])
region = regionmask.Regions([outline])
ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds(
*(-161, -29, 2), *(75, 13, -2)
)
print(ds_US)
###Output
_____no_output_____
###Markdown
Let's create a mask with each of these methods:
###Code
mask_rasterize = region.mask(ds_US, method="rasterize")
mask_shapely = region.mask(ds_US, method="shapely")
###Output
_____no_output_____
###Markdown
.. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword.
###Code
Plot the masked regions:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely")
###Code
Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but mimicks what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows.
### SREX regions
Create a global dataset:
###Output
_____no_output_____
###Markdown
ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("new (rasterize + shapely)");
###Code
Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells.
### Points at -180ยฐE (0ยฐE) and -90ยฐN
The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region.
We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid:
###Output
_____no_output_____
###Markdown
outline_global = np.array( [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]])region_global = regionmask.Regions([outline_global])lon = np.arange(-180, 180, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat)
###Code
Create the masks:
###Output
_____no_output_____
###Markdown
mask_global = region_global.mask(lon, lat) we need to manually create the maskmask_global_nontreat = mask_global.copy()mask_global_nontreat[-1, :] = np.NaNmask_global_nontreat[:, 0] = np.NaN
###Code
And illustrate the issue:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.3", lw=0.5, transform=ccrs.PlateCarree())ax = axes[0]mask_global_nontreat.plot(ax=ax, cmap=cmap1, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Not treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, **opt)ax.set_title("Treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False)
###Code
In the example the region spans the whole globe and there are gridpoints at -180ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b):
Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region
This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE (and not the one at -180ยฐE) and (ii) only the points at -90ยฐN get assigned to the region above.
###Output
_____no_output_____
###Markdown
outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot(line_kws=dict(color="b15928", zorder=3), add_label=False,)ax.plot(LON, LAT, "o", color="0.3", ms=2, transform=ccrs.PlateCarree(), zorder=5)mask_global_2regions.plot(ax=ax, cmap=cmap_2col, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.5)ax.outline_patch.set_zorder(1);
###Code
.. note:: This only applies if the border of the region falls exactly on the point. One way to avoid the problem is to calculate the `fractional overlap <https://github.com/regionmask/regionmask/issues/38>`_ of each gridpoint with the regions (which is not yet implemented).
###Output
_____no_output_____
###Markdown
Polygon interiors`Polygons` can have interior boundaries ('holes'). Prior to version 0.5.0 these were not considered and e.g. the Caspian Sea was not 'unmasked'. ExampleLet's test this on an example and define a `region_with_hole`:
###Code
interior = np.array(
[[-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0], [-86.0, 44.0],]
)
poly = Polygon(outline, [interior])
region_with_hole = regionmask.Regions([poly])
mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize")
mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely")
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))
opt = dict(add_colorbar=False, ec="0.5", lw=0.5)
mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)
mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt)
for ax in axes:
region.plot_regions(ax=ax, add_label=False)
ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree())
ax.coastlines(lw=0.5)
ax.plot(
ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
axes[0].set_title("rasterize")
axes[1].set_title("shapely");
###Output
_____no_output_____
###Markdown
Caspian Sea
###Code
land110 = regionmask.defined_regions.natural_earth.land_110
land_new = land110.mask(ds_GLOB)
f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))
opt = dict(add_colorbar=False)
land_new.plot(ax=ax, cmap=cmap2, **opt)
ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree())
ax.coastlines(resolution="50m", lw=0.5)
ax.plot(
ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree())
ax.set_title("Polygon interiors are unmasked");
###Output
_____no_output_____
###Markdown
Edge behavior and interiorsThis notebook illustrates the edge behavior and how Polygon interiors are treated.
###Code
.. note:: From version 0.5 ``regionmask`` treats points on the region borders differently and also considers poygon interiors (holes), e.g. the Caspian Sea in ``natural_earth.land_110`` region.
###Output
_____no_output_____
###Markdown
PreparationImport regionmask and check the version:
###Code
import regionmask
regionmask.__version__
###Output
_____no_output_____
###Markdown
Other imports
###Code
import xarray as xr
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from matplotlib import colors as mplc
from shapely.geometry import Polygon
###Output
_____no_output_____
###Markdown
Define some colors:
###Code
cmap1 = mplc.ListedColormap(["#9ecae1"])
cmap2 = mplc.ListedColormap(["#fc9272"])
cmap3 = mplc.ListedColormap(["#cab2d6"])
cmap_2col = mplc.ListedColormap(["#9ecae1", "#fc9272"])
###Output
_____no_output_____
###Markdown
MethodsRegionmask offers two methods to rasterize regions1. `rasterize`: fastest but only for equally-spaced grid, uses `rasterio.features.rasterize` internally.2. `shapely`: for irregular grids, uses `shapely.vectorized.contains` internally.All methods use the `lon` and `lat` coordinates to determine if a grid cell is in a region. `lon` and `lat` are assumed to indicate the *center* of the grid cell. Methods (1) and (2) have the same edge behavior and consider 'holes' in the regions. `regionmask` automatically determines which `method` to use.(2) subtracts a tiny offset from `lon` and `lat` to achieve a edge behaviour consistent with (1). Due to [mapbox/rasterio/1844](https://github.com/mapbox/rasterio/issues/1844) this is unfortunately also necessary for (1). Edge behaviorAs of version 0.5 `regionmask` has a new edge behavior - points that fall of the outline of a region are now consistently treated. This was not the case in earlier versions (xref [matplotlib/matplotlib9704](https://github.com/matplotlib/matplotlib/issues/9704)). It's easiest to see the edge behaviour in an ExampleDefine a region and a lon/ lat grid, such that some gridpoints lie exactly on the border:
###Code
outline = np.array([[-80.0, 50.0], [-80.0, 28.0], [-100.0, 28.0], [-100.0, 50.0]])
region = regionmask.Regions([outline])
ds_US = regionmask.core.utils.create_lon_lat_dataarray_from_bounds(
*(-161, -29, 2), *(75, 13, -2)
)
print(ds_US)
###Output
_____no_output_____
###Markdown
Let's create a mask with each of these methods:
###Code
mask_rasterize = region.mask(ds_US, method="rasterize")
mask_shapely = region.mask(ds_US, method="shapely")
###Output
_____no_output_____
###Markdown
.. note:: ``regionmask`` automatically detects which method to use, so there is no need to specify the ``method`` keyword.
###Code
Plot the masked regions:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, ec="0.5", lw=0.5, transform=ccrs.PlateCarree())mask_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)mask_shapely.plot(ax=axes[1], cmap=cmap2, **opt)for ax in axes: ax = region.plot_regions(ax=ax, add_label=False) ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree()) ax.coastlines(lw=0.5) ax.plot( ds_US.LON, ds_US.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree() )axes[0].set_title("rasterize")axes[1].set_title("shapely")
###Code
Points indicate the grid cell centers (`lon` and `lat`), lines the grid cell borders, colored grid cells are selected to be part of the region. The top and right grid cells now belong to the region while the left and bottom grid cells do not. This choice is arbitrary but mimicks what `rasterio.features.rasterize` does. This avoids spurious columns of unassigned grid points as the following example shows.
### SREX regions
Create a global dataset:
###Output
_____no_output_____
###Markdown
ds_GLOB = regionmask.core.utils.create_lon_lat_dataarray_from_bounds( *(-180, 181, 2), *(90, -91, -2)) srex = regionmask.defined_regions.srexsrex_new = srex.mask(ds_GLOB) f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))opt = dict(add_colorbar=False, cmap="viridis_r")srex_new.plot(ax=ax, ec="0.7", lw=0.25, **opt)srex.plot_regions(ax=ax, add_label=False, line_kws=dict(lw=1))ax.set_extent([-135, -50, 24, 51], ccrs.PlateCarree())ax.coastlines(resolution="50m", lw=0.25)ax.plot( ds_GLOB.LON, ds_GLOB.lat, "*", color="0.5", ms=0.5, transform=ccrs.PlateCarree())sel = ((ds_GLOB.LON == -105) | (ds_GLOB.LON == -85)) & (ds_GLOB.LAT > 28)ax.plot( ds_GLOB.LON.values[sel], ds_GLOB.LAT.values[sel], "*", color="r", ms=0.5, transform=ccrs.PlateCarree(),)ax.set_title("new (rasterize + shapely)");
###Code
Not assigning the grid cells falling exactly on the border of a region (red points) would leave vertical stripes of unassigned cells.
### Points at -180ยฐE (0ยฐE) and -90ยฐN
The described edge behaviour leads to a consistent treatment of points on the border. However, gridpoints at -180ยฐE (or 0ยฐE) and -90ยฐN would *never* fall in any region.
We exemplify this with a region spanning the whole globe and a coarse longitude/ latidude grid:
###Output
_____no_output_____
###Markdown
outline_global = np.array( [[-180.0, 90.0], [-180.0, -90.0], [180.0, -90.0], [180.0, 90.0]])region_global = regionmask.Regions([outline_global])lon = np.arange(-180, 180, 30)lat = np.arange(90, -91, -30)LON, LAT = np.meshgrid(lon, lat)
###Code
Create the masks:
###Output
_____no_output_____
###Markdown
mask_global = region_global.mask(lon, lat) we need to manually create the maskmask_global_nontreat = mask_global.copy()mask_global_nontreat[-1, :] = np.NaNmask_global_nontreat[:, 0] = np.NaN
###Code
And illustrate the issue:
###Output
_____no_output_____
###Markdown
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))f.subplots_adjust(wspace=0.05)opt = dict(add_colorbar=False, ec="0.3", lw=0.5, transform=ccrs.PlateCarree())ax = axes[0]mask_global_nontreat.plot(ax=ax, cmap=cmap1, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Not treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(a)", loc="left", size=6)ax = axes[1]mask_global.plot(ax=ax, cmap=cmap1, **opt)ax.set_title("Treating points at -180ยฐE and -90ยฐN", size=6)ax.set_title("(b)", loc="left", size=6)for ax in axes: ax = region_global.plot( ax=ax, line_kws=dict(lw=2, color="b15928"), add_label=False, ) ax.plot(LON, LAT, "o", color="0.3", ms=1, transform=ccrs.PlateCarree(), zorder=5) ax.outline_patch.set_visible(False)
###Code
In the example the region spans the whole globe and there are gridpoints at -180ยฐE and -90ยฐN. Just applying the approach above leads to gridpoints that are not assigned to any region even though the region is global (as shown in a). Therefore, points at -180ยฐE (or 0ยฐE) and -90ยฐN are treated specially (b):
Points at -180ยฐE (0ยฐE) are mapped to 180ยฐE (360ยฐE). Points at -90ยฐN are slightly shifted northwards (by 1 * 10 ** -10). Then it is tested if the shifted points belong to any region
This means that (i) a point at -180ยฐE is part of the region that is present at 180ยฐE (and not the one at -180ยฐE) and (ii) only the points at -90ยฐN get assigned to the region above.
###Output
_____no_output_____
###Markdown
outline_global1 = np.array([[-180.0, 60.0], [-180.0, -60.0], [0.0, -60.0], [0.0, 60.0]])outline_global2 = np.array([[0.0, 60.0], [0.0, -60.0], [180.0, -60.0], [180.0, 60.0]])region_global_2 = regionmask.Regions([outline_global1, outline_global2])mask_global_2regions = region_global_2.mask(lon, lat) ax = region_global_2.plot(line_kws=dict(color="b15928", zorder=3), add_label=False,)ax.plot(LON, LAT, "o", color="0.3", ms=2, transform=ccrs.PlateCarree(), zorder=5)mask_global_2regions.plot(ax=ax, cmap=cmap_2col, **opt) only for the gridlinesmask_global.plot(ax=ax, colors=["none"], levels=1, **opt)ax.set_title("Points at -180ยฐE are mapped to 180ยฐE", size=6)ax.outline_patch.set_lw(0.5)ax.outline_patch.set_zorder(1);
###Code
.. note:: This only applies if the border of the region falls exactly on the point. One way to avoid the problem is to calculate the `fractional overlap <https://github.com/mathause/regionmask/issues/38>`_ of each gridpoint with the regions (which is not yet implemented).
###Output
_____no_output_____
###Markdown
Polygon interiors`Polygons` can have interior boundaries ('holes'). Prior to version 0.5.0 these were not considered and e.g. the Caspian Sea was not 'unmasked'. ExampleLet's test this on an example and define a `region_with_hole`:
###Code
interior = np.array(
[[-86.0, 44.0], [-86.0, 34.0], [-94.0, 34.0], [-94.0, 44.0], [-86.0, 44.0],]
)
poly = Polygon(outline, [interior])
region_with_hole = regionmask.Regions([poly])
mask_hole_rasterize = region_with_hole.mask(ds_US, method="rasterize")
mask_hole_shapely = region_with_hole.mask(ds_US, method="shapely")
f, axes = plt.subplots(1, 2, subplot_kw=dict(projection=ccrs.PlateCarree()))
opt = dict(add_colorbar=False, ec="0.5", lw=0.5)
mask_hole_rasterize.plot(ax=axes[0], cmap=cmap1, **opt)
mask_hole_shapely.plot(ax=axes[1], cmap=cmap2, **opt)
for ax in axes:
region.plot_regions(ax=ax, add_label=False)
ax.set_extent([-105, -75, 25, 55], ccrs.PlateCarree())
ax.coastlines(lw=0.5)
ax.plot(
ds_US.LON, ds_US.lat, "o", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
axes[0].set_title("rasterize")
axes[1].set_title("shapely");
###Output
_____no_output_____
###Markdown
Caspian Sea
###Code
land110 = regionmask.defined_regions.natural_earth.land_110
land_new = land110.mask(ds_GLOB)
f, ax = plt.subplots(1, 1, subplot_kw=dict(projection=ccrs.PlateCarree()))
opt = dict(add_colorbar=False)
land_new.plot(ax=ax, cmap=cmap2, **opt)
ax.set_extent([15, 75, 25, 50], ccrs.PlateCarree())
ax.coastlines(resolution="50m", lw=0.5)
ax.plot(
ds_GLOB.LON, ds_GLOB.lat, ".", color="0.5", ms=0.5, transform=ccrs.PlateCarree()
)
ax.text(52, 43.5, "Caspian Sea", transform=ccrs.PlateCarree())
ax.set_title("Polygon interiors are unmasked");
###Output
_____no_output_____ |
object_tracking_from_video.ipynb | ###Markdown
object_tracking_from_videoIn this notebook we apply object tracking using SORT to determine the motion of people in a video, and count the number of people crossing a line. The general idea and the video are taken from https://www.pyimagesearch.com/2018/08/13/opencv-people-counter/In the video there are 6 people in total - 4 walk noth-south and 2 south-north
###Code
#!pip install -r requirements.txt
from pathlib import Path
import pandas as pd
import cv2
from IPython.display import Image
import deepstack.core as ds
import json
import numpy as np
from sort.sort import *
%matplotlib inline
videos = list(Path("video/").rglob("*.mp4"))
video_path = str(videos[0]) # use video 0
print(video_path)
video_path
###Output
_____no_output_____
###Markdown
Read the frames and write to a temp folder (contents are gitignored to keep repo small)
###Code
FRAME_SAMPLING = 5 # video is at 30 FPS so down sample to limit compute
vidcap = cv2.VideoCapture(video_path)
success, frame = vidcap.read()
count = 0
cv2.imwrite("tmp/frame%d.jpg" % count, frame) # save frame as JPEG file
frames = list(Path("tmp/").rglob("*.jpg"))
frame_path = str(frames[0])
print(frame_path)
Image(frame_path)
###Output
_____no_output_____
###Markdown
Now iterate over all frames, saving if they are at the FRAME_SAMPLING. Use a list of dictionaries to keep info about the frames
###Code
results = []
true_count = 0 # in the original video
frame_count = 0 # in the sampled images
vidcap = cv2.VideoCapture(video_path)
success, frame = vidcap.read()
while success:
success, frame = vidcap.read()
frame_path = f"tmp/frame_{frame_count}.jpg"
if true_count % FRAME_SAMPLING == 0:
cv2.imwrite(frame_path, frame) # save frame as JPEG file
frame_info = {}
frame_info['true_count'] = true_count
frame_info['frame_count'] = frame_count
frame_info['frame_path'] = frame_path
results.append(frame_info)
frame_count += 1
true_count += 1
print(results[0])
print(len(results))
###Output
{'true_count': 0, 'frame_count': 0, 'frame_path': 'tmp/frame_0.jpg'}
107
###Markdown
Process frames to extract object bounding boxes
###Code
# deepstack credentials
IP_ADDRESS = 'localhost'
PORT = 80
API_KEY = "" # if you have not set an api_key, just pass an empty string
dsobject = ds.DeepstackObject(IP_ADDRESS, PORT, API_KEY)
%%time
for i, frame_info in enumerate(results):
image = str(frame_info['frame_path'])
try:
with open(image, 'rb') as image_bytes:
predictions = dsobject.detect(image_bytes)
frame_info['predictions'] = predictions
frame_info['persons'] = len([p for p in predictions if p['label']=='person'])
print(f"Processing image number {i} : {image} : {frame_info['persons']} persons")
except Exception as exc:
print(exc)
###Output
Processing image number 0 : tmp/frame_0.jpg : 0 persons
Processing image number 1 : tmp/frame_1.jpg : 0 persons
Processing image number 2 : tmp/frame_2.jpg : 0 persons
Processing image number 3 : tmp/frame_3.jpg : 0 persons
Processing image number 4 : tmp/frame_4.jpg : 0 persons
Processing image number 5 : tmp/frame_5.jpg : 0 persons
Processing image number 6 : tmp/frame_6.jpg : 0 persons
Processing image number 7 : tmp/frame_7.jpg : 0 persons
Processing image number 8 : tmp/frame_8.jpg : 0 persons
Processing image number 9 : tmp/frame_9.jpg : 1 persons
Processing image number 10 : tmp/frame_10.jpg : 1 persons
Processing image number 11 : tmp/frame_11.jpg : 0 persons
Processing image number 12 : tmp/frame_12.jpg : 0 persons
Processing image number 13 : tmp/frame_13.jpg : 0 persons
Processing image number 14 : tmp/frame_14.jpg : 0 persons
Processing image number 15 : tmp/frame_15.jpg : 0 persons
Processing image number 16 : tmp/frame_16.jpg : 0 persons
Processing image number 17 : tmp/frame_17.jpg : 0 persons
Processing image number 18 : tmp/frame_18.jpg : 0 persons
Processing image number 19 : tmp/frame_19.jpg : 0 persons
Processing image number 20 : tmp/frame_20.jpg : 3 persons
Processing image number 21 : tmp/frame_21.jpg : 1 persons
Processing image number 22 : tmp/frame_22.jpg : 0 persons
Processing image number 23 : tmp/frame_23.jpg : 1 persons
Processing image number 24 : tmp/frame_24.jpg : 2 persons
Processing image number 25 : tmp/frame_25.jpg : 1 persons
Processing image number 26 : tmp/frame_26.jpg : 2 persons
Processing image number 27 : tmp/frame_27.jpg : 1 persons
Processing image number 28 : tmp/frame_28.jpg : 1 persons
Processing image number 29 : tmp/frame_29.jpg : 0 persons
Processing image number 30 : tmp/frame_30.jpg : 1 persons
Processing image number 31 : tmp/frame_31.jpg : 1 persons
Processing image number 32 : tmp/frame_32.jpg : 2 persons
Processing image number 33 : tmp/frame_33.jpg : 1 persons
Processing image number 34 : tmp/frame_34.jpg : 1 persons
Processing image number 35 : tmp/frame_35.jpg : 1 persons
Processing image number 36 : tmp/frame_36.jpg : 0 persons
Processing image number 37 : tmp/frame_37.jpg : 0 persons
Processing image number 38 : tmp/frame_38.jpg : 0 persons
Processing image number 39 : tmp/frame_39.jpg : 0 persons
Processing image number 40 : tmp/frame_40.jpg : 0 persons
Processing image number 41 : tmp/frame_41.jpg : 0 persons
Processing image number 42 : tmp/frame_42.jpg : 0 persons
Processing image number 43 : tmp/frame_43.jpg : 0 persons
Processing image number 44 : tmp/frame_44.jpg : 0 persons
Processing image number 45 : tmp/frame_45.jpg : 0 persons
Processing image number 46 : tmp/frame_46.jpg : 0 persons
Processing image number 47 : tmp/frame_47.jpg : 0 persons
Processing image number 48 : tmp/frame_48.jpg : 0 persons
Processing image number 49 : tmp/frame_49.jpg : 0 persons
Processing image number 50 : tmp/frame_50.jpg : 0 persons
Processing image number 51 : tmp/frame_51.jpg : 0 persons
Processing image number 52 : tmp/frame_52.jpg : 0 persons
Processing image number 53 : tmp/frame_53.jpg : 0 persons
Processing image number 54 : tmp/frame_54.jpg : 0 persons
Processing image number 55 : tmp/frame_55.jpg : 0 persons
Processing image number 56 : tmp/frame_56.jpg : 0 persons
Processing image number 57 : tmp/frame_57.jpg : 0 persons
Processing image number 58 : tmp/frame_58.jpg : 0 persons
Processing image number 59 : tmp/frame_59.jpg : 0 persons
Processing image number 60 : tmp/frame_60.jpg : 0 persons
Processing image number 61 : tmp/frame_61.jpg : 0 persons
Processing image number 62 : tmp/frame_62.jpg : 0 persons
Processing image number 63 : tmp/frame_63.jpg : 0 persons
Processing image number 64 : tmp/frame_64.jpg : 0 persons
Processing image number 65 : tmp/frame_65.jpg : 0 persons
Processing image number 66 : tmp/frame_66.jpg : 1 persons
Processing image number 67 : tmp/frame_67.jpg : 2 persons
Processing image number 68 : tmp/frame_68.jpg : 2 persons
Processing image number 69 : tmp/frame_69.jpg : 2 persons
Processing image number 70 : tmp/frame_70.jpg : 2 persons
Processing image number 71 : tmp/frame_71.jpg : 0 persons
Processing image number 72 : tmp/frame_72.jpg : 1 persons
Processing image number 73 : tmp/frame_73.jpg : 0 persons
Processing image number 74 : tmp/frame_74.jpg : 0 persons
Processing image number 75 : tmp/frame_75.jpg : 0 persons
Processing image number 76 : tmp/frame_76.jpg : 0 persons
Processing image number 77 : tmp/frame_77.jpg : 0 persons
Processing image number 78 : tmp/frame_78.jpg : 0 persons
Processing image number 79 : tmp/frame_79.jpg : 2 persons
Processing image number 80 : tmp/frame_80.jpg : 0 persons
Processing image number 81 : tmp/frame_81.jpg : 0 persons
Processing image number 82 : tmp/frame_82.jpg : 0 persons
Processing image number 83 : tmp/frame_83.jpg : 0 persons
Processing image number 84 : tmp/frame_84.jpg : 0 persons
Processing image number 85 : tmp/frame_85.jpg : 0 persons
Processing image number 86 : tmp/frame_86.jpg : 1 persons
Processing image number 87 : tmp/frame_87.jpg : 0 persons
Processing image number 88 : tmp/frame_88.jpg : 0 persons
Processing image number 89 : tmp/frame_89.jpg : 0 persons
Processing image number 90 : tmp/frame_90.jpg : 0 persons
Processing image number 91 : tmp/frame_91.jpg : 0 persons
Processing image number 92 : tmp/frame_92.jpg : 1 persons
Processing image number 93 : tmp/frame_93.jpg : 0 persons
Processing image number 94 : tmp/frame_94.jpg : 0 persons
Processing image number 95 : tmp/frame_95.jpg : 0 persons
Processing image number 96 : tmp/frame_96.jpg : 0 persons
Processing image number 97 : tmp/frame_97.jpg : 0 persons
Processing image number 98 : tmp/frame_98.jpg : 1 persons
Processing image number 99 : tmp/frame_99.jpg : 0 persons
Processing image number 100 : tmp/frame_100.jpg : 0 persons
Processing image number 101 : tmp/frame_101.jpg : 0 persons
Processing image number 102 : tmp/frame_102.jpg : 0 persons
Processing image number 103 : tmp/frame_103.jpg : 0 persons
Processing image number 104 : tmp/frame_104.jpg : 0 persons
Processing image number 105 : tmp/frame_105.jpg : 0 persons
Processing image number 106 : tmp/frame_106.jpg : 0 persons
CPU times: user 478 ms, sys: 126 ms, total: 604 ms
Wall time: 47.5 s
###Markdown
Write results to json for safekeeping
###Code
with open('results.json', 'w') as fp:
json.dump(results, fp)
###Output
_____no_output_____
###Markdown
Extract the person count and visualise
###Code
persons = {p['frame_count']:p['persons'] for p in results}
pd.Series(persons).plot.bar(figsize=(15,5)).set_ylabel('Person count')
###Output
_____no_output_____
###Markdown
This looks approximately correct but appears to be poor at identifying the individuals. We could use more frames to improve our chance of detection as maybe the sampled frames are not the best Track with sort* https://towardsdatascience.com/detect-and-track-baseball-using-detectron2-and-sort-6dd92a46e6f2* https://github.com/abewley/sortNote I have just placed the contents of the sort repo in the folder `sort`
###Code
# !pip install -r sort/requirements.txt
###Output
_____no_output_____
###Markdown
Now we iterate over the frames, and for each frame pass in the bounding box coordinates, and get back the object tracked ID. * Sort expects a numpy array of detections in the format of `[[x1,y1,x2,y2,score],[x1,y1,x2,y2,score],โฆ]`* Use `np.empty((0, 5))` for frames without detectionsWrite a helper to extract this info from result[10]
###Code
test_prediction = [{'confidence': 0.97203445,
'label': 'person',
'y_min': 0,
'x_min': 104,
'y_max': 108,
'x_max': 162},
{'confidence': 0.8538234,
'label': 'person',
'y_min': 2,
'x_min': 185,
'y_max': 84,
'x_max': 235}]
test_prediction
def get_detections(prediction : list) -> np.array:
detections = []
people = [p for p in prediction if p['label']=='person']
if len(people) == 0:
return np.empty((0, 5))
for p in people:
detection = np.array([p['x_min'], p['y_min'], p['x_max'], p['y_max'], p['confidence']])
detections.append(detection)
return np.array(detections)
get_detections(test_prediction)
#create instance of SORT. Note that rerunning this cell increments the track id each time
mot_tracker = Sort()
for i, frame_info in enumerate(results):
image = str(frame_info['frame_path'])
detections = get_detections(frame_info['predictions'])
track_bbs_ids = mot_tracker.update(detections)
print(i, track_bbs_ids)
###Output
0 []
1 []
2 []
3 []
4 []
5 []
6 []
7 []
8 []
9 []
10 []
11 []
12 []
13 []
14 []
15 []
16 []
17 []
18 []
19 []
20 []
21 []
22 []
23 []
24 []
25 [[183.96008252 44.48918636 253.9329312 165.70161142 2. ]]
26 [[187.51927306 63.42292068 260.85094848 187.80326276 2. ]]
27 [[187.91299682 86.18878162 264.61184814 215.06813559 2. ]]
28 [[184.38561766 105.32894448 261.83471226 226.89267335 2. ]]
29 []
30 []
31 []
32 []
33 []
34 [[194.44007995 232.20141163 266.79182515 305.72407341 7. ]]
35 [[195.91589067 250.91134354 262.48308501 307.04828682 7. ]]
36 []
37 []
38 []
39 []
40 []
41 []
42 []
43 []
44 []
45 []
46 []
47 []
48 []
49 []
50 []
51 []
52 []
53 []
54 []
55 []
56 []
57 []
58 []
59 []
60 []
61 []
62 []
63 []
64 []
65 []
66 []
67 []
68 []
69 [[222.88807956 194.25703272 310.70742558 287.16672684 8. ]]
70 [[303.4908146 189.9329321 373.6862038 269.61225669 9. ]
[220.52678672 170.09929987 310.26744733 275.23143929 8. ]]
71 []
72 []
73 []
74 []
75 []
76 []
77 []
78 []
79 []
80 []
81 []
82 []
83 []
84 []
85 []
86 []
87 []
88 []
89 []
90 []
91 []
92 []
93 []
94 []
95 []
96 []
97 []
98 []
99 []
100 []
101 []
102 []
103 []
104 []
105 []
106 []
|
L10_LDA x PCA/L10_linear_discriminant_analysis.ipynb | ###Markdown
Linear Discriminant Analysis from scratch Introduction Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications. The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting ("curse of dimensionality") and also reduce computational costs.Ronald A. Fisher formulated the *Linear Discriminant* in 1936 ([The Use of Multiple Measurements in Taxonomic Problems](http://onlinelibrary.wiley.com/doi/10.1111/j.1469-1809.1936.tb02137.x/abstract)), and it also has some practical uses as classifier. The original Linear discriminant was described for a 2-class problem, and it was then later generalized as "multi-class Linear Discriminant Analysis" or "Multiple Discriminant Analysis" by C. R. Rao in 1948 ([The utilization of multiple measurements in problems of biological classification](http://www.jstor.org/stable/2983775)) **The general LDA approach is very similar to a Principal Component Analysis, but in addition to finding the component axes that maximize the variance of our data (PCA), we are additionally interested in the axes that maximize the separation between multiple classes (LDA).** So, in a nutshell, often the goal of an LDA is to project a feature space (a dataset n-dimensional samples) onto a smaller subspace $k$ (where $k \leq n-1$) while maintaining the class-discriminatory information. In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation ("curse of dimensionality"). Principal Component Analysis vs. Linear Discriminant Analysis Both **Linear Discriminant Analysis (LDA)** and **Principal Component Analysis (PCA)** are linear transformation techniques that are commonly used for dimensionality reduction. **PCA can be described as an "unsupervised"** algorithm, since it "ignores" class labels and its goal is to find the directions (the so-called principal components) that maximize the variance in a dataset.In contrast to PCA, **LDA is "supervised"** and computes the directions ("linear discriminants") that will represent the axes that that maximize the separation between multiple classes.Although it might sound intuitive that LDA is superior to PCA for a multi-class classification task where the class labels are known, this might not always the case. For example, comparisons between classification accuracies for image recognition after using PCA or LDA show that PCA tends to outperform LDA if the number of samples per class is relatively small ([PCA vs. LDA](http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=908974), A.M. Martinez et al., 2001).In practice, it is also not uncommon to use both LDA and PCA in combination: E.g., PCA for dimensionality reduction followed by an LDA. What is a "good" feature subspace? Let's assume that our goal is to reduce the dimensions of a $d$-dimensional dataset by projecting it onto a $(k)$-dimensional subspace (where $k\;<\;d$). So, how do we know what size we should choose for $k$ ($k$ = the number of dimensions of the new feature subspace), and how do we know if we have a feature space that represents our data "well"? Later, we will compute eigenvectors (the components) from our data set and collect them in a so-called scatter-matrices (i.e., the in-between-class scatter matrix and within-class scatter matrix). Each of these eigenvectors is associated with an eigenvalue, which tells us about the "length" or "magnitude" of the eigenvectors. If we would observe that all eigenvalues have a similar magnitude, then this may be a good indicator that our data is already projected on a "good" feature space. And in the other scenario, if some of the eigenvalues are much much larger than others, we might be interested in keeping only those eigenvectors with the highest eigenvalues, since they contain more information about our data distribution. Vice versa, eigenvalues that are close to 0 are less informative and we might consider dropping those for constructing the new feature subspace. Summarizing the LDA approach in 6 steps Listed below are the 6 general steps for performing a linear discriminant analysis; we will explore them in more detail in the following sections.1. Center points2. Compute the $d$-dimensional mean vectors for the different classes from the dataset.3. Compute the scatter matrices (in-between-class and within-class scatter matrix).4. Compute the eigenvectors ($\pmb e_1, \; \pmb e_2, \; ..., \; \pmb e_d$) and corresponding eigenvalues ($\pmb \lambda_1, \; \pmb \lambda_2, \; ..., \; \pmb \lambda_d$) for the scatter matrices.5. Sort the eigenvectors by decreasing eigenvalues and choose $k$ eigenvectors with the largest eigenvalues to form a $d \times k$ dimensional matrix $\pmb W\;$ (where every column represents an eigenvector).6. Use this $d \times k$ eigenvector matrix to transform the samples onto the new subspace. This can be summarized by the matrix multiplication $\pmb Y = \pmb X \times \pmb W$ (where $\pmb X$ is a $n \times d$-dimensional matrix representing the $n$ samples, and $\pmb y$ are the transformed $n \times k$-dimensional samples in the new subspace). Preparing the sample data set Open dataset
###Code
import seaborn as sns
df = sns.load_dataset("iris")
###Output
_____no_output_____
###Markdown
Exploratory data analysis (EDA)
###Code
sns.pairplot(df, hue="species")
###Output
_____no_output_____
###Markdown
From just looking at these simple graphical representations of the features, we can already tell that the petal lengths and widths are likely better suited as potential features two separate between the three flower classes. In practice, instead of reducing the dimensionality via a projection (here: LDA), a good alternative would be a feature selection technique. For low-dimensional datasets like Iris, a glance at those histograms would already be very informative. Another simple, but very useful technique would be to use feature selection algorithms, which I have described in more detail in future lectures Normality assumptions It should be mentioned that LDA assumes normal distributed data, features that are statistically independent, and identical covariance matrices for every class. However, this only applies for LDA as classifier and LDA for dimensionality reduction can also work reasonably well if those assumptions are violated. And even for classification tasks LDA seems can be quite robust to the distribution of the data: > "linear discriminant analysis frequently achieves good performances in> the tasks of face and object recognition, even though the assumptions> of common covariance matrix among groups and normality are often> violated (Duda, et al., 2001)" (Tao Li, et al., 2006).Tao Li, Shenghuo Zhu, and Mitsunori Ogihara. โ[Using Discriminant Analysis for Multi-Class Classification: An Experimental Investigation](http://link.springer.com/article/10.1007%2Fs10115-006-0013-y).โ Knowledge and Information Systems 10, no. 4 (2006): 453โ72.) Duda, Richard O, Peter E Hart, and David G Stork. 2001. Pattern Classification. New York: Wiley. Preprocessing
###Code
df.head()
X = df[['sepal_length','sepal_width','petal_length','petal_width']].values
y = df['species'].values
enc = LabelEncoder()
enc.fit(y)
y = enc.transform(y)
###Output
_____no_output_____
###Markdown
LDA in 6 steps After we went through several preparation steps, our data is finally ready for the actual LDA. In practice, LDA for dimensionality reduction would be just another preprocessing step for a typical machine learning or pattern classification task. Step1: Center points
###Code
X = X - X.mean(axis=0)
###Output
_____no_output_____
###Markdown
Step 2: Computing the d-dimensional mean vectors In this first step, we will start off with a simple computation of the mean vectors $\boldsymbol{\mu}_i$, $(i = 1,2,3)$ of the 3 different flower classes: $\boldsymbol{\mu}_i = \begin{bmatrix} \mu_{\omega_i (\text{sepal length)}}\\ \mu_{\omega_i (\text{sepal width})}\\ \mu_{\omega_i (\text{petal length)}}\\\mu_{\omega_i (\text{petal width})}\\\end{bmatrix} \; , \quad \text{with} \quad i = 1,2,3$
###Code
np.set_printoptions(precision=4, suppress=True)
mean_vectors = []
for cl in range(0,3):
mean_vectors.append(np.mean(X[y==cl], axis=0))
print('Mean Vector class %s: %s\n' %(cl, mean_vectors[cl]))
###Output
Mean Vector class 0: [-0.8373 0.3707 -2.296 -0.9533]
Mean Vector class 1: [ 0.0927 -0.2873 0.502 0.1267]
Mean Vector class 2: [ 0.7447 -0.0833 1.794 0.8267]
###Markdown
Step 3: Computing the Scatter Matrices Now, we will compute the two *4x4*-dimensional matrices: The within-class and the between-class scatter matrix. 3.1 Within-class scatter matrix $S_W$ The **within-class scatter** matrix $S_W$ is computed by the following equation: $S_W = \sum\limits_{i=1}^{c} S_i$where $S_i = \sum\limits_{\pmb x \in D_i}^n (\pmb x - \pmb \mu_i)\;(\pmb x - \pmb \mu_i)^T$ (scatter matrix for every class) and $\pmb \mu_i$ is the mean vector $\pmb \mu_i = \frac{1}{n_i} \sum\limits_{\pmb x \in D_i}^n \; \pmb x_k$
###Code
S_W = np.zeros((4,4))
for cl,mv in zip(range(0,3), mean_vectors):
class_sc_mat = np.zeros((4,4)) # scatter matrix for every class
for row in X[y == cl]:
row, mv = row.reshape(4,1), mv.reshape(4,1) # make column vectors
class_sc_mat += (row-mv).dot((row-mv).T)
S_W += class_sc_mat # sum class scatter matrices
print('within-class Scatter Matrix:\n', S_W)
###Output
within-class Scatter Matrix:
[[38.9562 13.63 24.6246 5.645 ]
[13.63 16.962 8.1208 4.8084]
[24.6246 8.1208 27.2226 6.2718]
[ 5.645 4.8084 6.2718 6.1566]]
###Markdown
3.2 Between-class scatter matrix $S_B$ The **between-class scatter** matrix $S_B$ is computed by the following equation: $S_B = \sum\limits_{i=1}^{c} N_{i} (\pmb \mu_i - \pmb \mu) (\pmb \mu_i - \pmb \mu)^T$where $\pmb \mu$ is the overall mean, and $\pmb \mu_{i}$ and $N_{i}$ are the sample mean and sizes of the respective classes.
###Code
overall_mean = np.mean(X, axis=0)
S_B = np.zeros((4,4))
for i,mean_vec in enumerate(mean_vectors):
n = X[y==i,:].shape[0]
mean_vec = mean_vec.reshape(4,1) # make column vector
overall_mean = overall_mean.reshape(4,1) # make column vector
S_B += n * (mean_vec - overall_mean).dot((mean_vec - overall_mean).T)
print('between-class Scatter Matrix:\n', S_B)
###Output
between-class Scatter Matrix:
[[ 63.2121 -19.9527 165.2484 71.2793]
[-19.9527 11.3449 -57.2396 -22.9327]
[165.2484 -57.2396 437.1028 186.774 ]
[ 71.2793 -22.9327 186.774 80.4133]]
###Markdown
Step 4: Solving the generalized eigenvalue problem for the matrix $S_{W}^{-1}S_B$ Next, we will solve the generalized eigenvalue problem for the matrix $S_{W}^{-1}S_B$ to obtain the linear discriminants.
###Code
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
for i in range(len(eig_vals)):
#eigvec_sc = eig_vecs[:,i].reshape(4,1)
print('\nEigenvector {}: \n{}'.format(i+1, eig_vecs[:,i].real))
print('Eigenvalue {:}: {:.2e}'.format(i+1, eig_vals[i].real))
###Output
Eigenvector 1:
[-0.2087 -0.3862 0.554 0.7074]
Eigenvalue 1: 3.22e+01
Eigenvector 2:
[-0.0065 -0.5866 0.2526 -0.7695]
Eigenvalue 2: 2.85e-01
Eigenvector 3:
[ 0.8825 -0.2639 -0.2357 -0.31 ]
Eigenvalue 3: 4.76e-15
Eigenvector 4:
[-0.2844 0.4123 0.4716 -0.7258]
Eigenvalue 4: -8.19e-15
###Markdown
After this decomposition of our square matrix into eigenvectors and eigenvalues, let us briefly recapitulate how we can interpret those results. As we remember from our first linear algebra class in high school or college, both eigenvectors and eigenvalues are providing us with information about the distortion of a linear transformation: The eigenvectors are basically the direction of this distortion, and the eigenvalues are the scaling factor for the eigenvectors that describing the magnitude of the distortion. If we are performing the LDA for dimensionality reduction, the eigenvectors are important since they will form the new axes of our new feature subspace; the associated eigenvalues are of particular interest since they will tell us how "informative" the new "axes" are. Let us briefly double-check our calculation and talk more about the eigenvalues in the next section. Checking the eigenvector-eigenvalue calculation A quick check that the eigenvector-eigenvalue calculation is correct and satisfy the equation:$\pmb A\pmb{v} = \lambda\pmb{v}$ where $\pmb A = S_{W}^{-1}S_B\\\pmb{v} = \; \text{Eigenvector}\\\lambda = \; \text{Eigenvalue}$
###Code
for i in range(len(eig_vals)):
eigv = eig_vecs[:,i].reshape(4,1)
np.testing.assert_array_almost_equal(np.linalg.inv(S_W).dot(S_B).dot(eigv),
eig_vals[i] * eigv,
decimal=6, err_msg='', verbose=True)
print('ok')
###Output
ok
###Markdown
Step 5: Selecting linear discriminants for the new feature subspace 5.1. Sorting the eigenvectors by decreasing eigenvalues Remember from the introduction that we are not only interested in merely projecting the data into a subspace that improves the class separability, but also reduces the dimensionality of our feature space, (where the eigenvectors will form the axes of this new feature subspace). However, the eigenvectors only define the directions of the new axis, since they have all the same unit length 1. So, in order to decide which eigenvector(s) we want to drop for our lower-dimensional subspace, we have to take a look at the corresponding eigenvalues of the eigenvectors. Roughly speaking, the eigenvectors with the lowest eigenvalues bear the least information about the distribution of the data, and those are the ones we want to drop. The common approach is to rank the eigenvectors from highest to lowest corresponding eigenvalue and choose the top $k$ eigenvectors.
###Code
i = np.flip(np.argsort(eig_vals))
eig_vals=np.abs(eig_vals[i])
eig_vecs=eig_vecs[:,i]
eig_vals
print('Variance explained:\n')
eigv_sum = np.sum(eig_vals)
var_expl = eig_vals/eigv_sum
var_expl
###Output
Variance explained:
###Markdown
If we take a look at the eigenvalues, we can already see that 2 eigenvalues are close to 0. The reason why these are close to 0 is not that they are not informative but it's a floating-point imprecision. In fact, these two last eigenvalues should be exactly zero: In LDA, the number of linear discriminants is at most $cโ1$ where $c$ is the number of class labels, since the in-between scatter matrix $S_B$ is the sum of $c$ matrices with rank 1 or less. Note that in the rare case of perfect collinearity (all aligned sample points fall on a straight line), the covariance matrix would have rank one, which would result in only one eigenvector with a nonzero eigenvalue. The first eigenpair is by far the most informative one, and we won't loose much information if we would form a 1D-feature spaced based on this eigenpair. 5.2. Choosing *k* eigenvectors with the largest eigenvalues After sorting the eigenpairs by decreasing eigenvalues, it is now time to construct our $d \times k$-dimensional eigenvector matrix $\pmb W$ (here $4 \times 2$: based on the 2 most informative eigenpairs) and thereby reducing the initial 4-dimensional feature space into a 2-dimensional feature subspace.
###Code
W = eig_vecs[:,0:2]
W
###Output
_____no_output_____
###Markdown
Step 6: Transforming the samples onto the new subspace In the last step, we use the $4 \times 2$-dimensional matrix $\pmb W$ that we just computed to transform our samples onto the new subspace via the equation $\pmb Y = \pmb X \times \pmb W $.(where $\pmb X$ is a $n \times d$-dimensional matrix representing the $n$ samples, and $\pmb Y$ are the transformed $n \times k$-dimensional samples in the new subspace).
###Code
X_lda = X.dot(W)
np.shape(X_lda)
###Output
_____no_output_____
###Markdown
The scatter plot above represents our new feature subspace that we constructed via LDA. We can see that the first linear discriminant "LD1" separates the classes quite nicely. However, the second discriminant, "LD2", does not add much valuable information, which we've already concluded when we looked at the ranked eigenvalues is step 4.
###Code
plt.scatter(X_lda[:,0], X_lda[:,1], c=y, cmap='viridis')
###Output
_____no_output_____
###Markdown
LDA via scikit-learn Now, after we have seen how an Linear Discriminant Analysis works using a step-by-step approach, there is also a more convenient way to achive the same via the `LDA` class implemented in the [`scikit-learn`](http://scikit-learn.org/stable/) machine learning library.
###Code
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# LDA
lda = LinearDiscriminantAnalysis(n_components=2)
lda.fit(X, y)
X_lda = lda.transform(X)
plt.scatter(X_lda[:,0], X_lda[:,1], c=y, cmap='viridis')
###Output
_____no_output_____
###Markdown
A comparison of PCA and LDA In order to compare the feature subspace that we obtained via the Linear Discriminant Analysis, we will use the `PCA` class from the `scikit-learn` machine-learning library. The documentation can be found here: [https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.htmlsphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.htmlsphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py). For our convenience, we can directly specify to how many components we want to retain in our input dataset via the `n_components` parameter.
###Code
from sklearn.decomposition import PCA
# PCA
pca = PCA(n_components=2)
pca.fit(X, y)
X_pca = pca.transform(X)
plt.scatter(X_pca[:,0], X_pca[:,1], c=y, cmap='viridis')
###Output
_____no_output_____ |
finlab/u12_svm.ipynb | ###Markdown
Support Vector Machine (SVM) features
###Code
import finlab.ml as ml
dataset = ml.fundamental_features()
dataset.head()
dataset.columns
features = ['R103_ROE็จ
ๅพ', 'R402_็ๆฅญๆฏๅฉๆ้ท็']
dataset = dataset[features].dropna(how='any')
dataset.head()
###Output
_____no_output_____
###Markdown
add prediction
###Code
ml.add_profit_prediction(dataset)
dataset.head()
%matplotlib inline
dataset.plot.scatter(features[0], features[1])
###Output
_____no_output_____
###Markdown
remove outliers
###Code
def is_valid(feature, nstd):
ub = feature.mean() + nstd * feature.std()
lb = feature.mean() - nstd * feature.std()
return (feature > lb) & (feature <ub)
valid = is_valid(dataset['R103_ROE็จ
ๅพ'], 2) & is_valid(dataset['R402_็ๆฅญๆฏๅฉๆ้ท็'], 0.05)
dataset_rmoutliers = dataset[valid].dropna()
dataset_rmoutliers['R103_ROE็จ
ๅพ'].hist(bins=100)
#dataset_rmoutliers['R402_็ๆฅญๆฏๅฉๆ้ท็'].hist(bins=100)
###Output
_____no_output_____
###Markdown
Scale features
###Code
import pandas as pd
import sklearn.preprocessing as preprocessing
dataset_scaled = pd.DataFrame(preprocessing.scale(dataset_rmoutliers), index=dataset_rmoutliers.index, columns=dataset_rmoutliers.columns)
dataset_scaled.head()
dataset_scaled['R103_ROE็จ
ๅพ'].hist(bins=100)
dataset_scaled['R402_็ๆฅญๆฏๅฉๆ้ท็'].hist(bins=100, alpha=0.5)
dataset_scaled['return'] = dataset_rmoutliers['return']
###Output
_____no_output_____
###Markdown
Training
###Code
from sklearn.model_selection import train_test_split
dataset_train, dataset_test = train_test_split(dataset_scaled, test_size=0.1, random_state=0)
from sklearn.svm import SVC
cf = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto_deprecated',
kernel='linear', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
cf.fit(dataset_train[features], dataset_train['return'] > dataset_train['return'].quantile(0.5))
from mlxtend.plotting import plot_decision_regions
features_plot = dataset_test[features].values
labels_plot = (dataset_test['return'] > dataset_test['return'].quantile(0.5)).astype(int).values
plot_decision_regions(features_plot, labels_plot, cf)
###Output
_____no_output_____
###Markdown
backtest
###Code
history = dataset_test.copy()
history['svm prediction'] = cf.predict(dataset_test[features])
history = history.reset_index()
dates = sorted(list(set(history['date'])))
seasonal_returns1 = []
seasonal_returns2 = []
for date in dates:
current_stocks = history[history['date'] == date]
buy_stocks = current_stocks[current_stocks['svm prediction'] == True]
sell_stocks = current_stocks[current_stocks['svm prediction'] == False]
seasonal_return1 = buy_stocks['return'].mean()
seasonal_returns1.append(seasonal_return1)
seasonal_return2 = sell_stocks['return'].mean()
seasonal_returns2.append(seasonal_return2)
import matplotlib.pyplot as plt
plt.style.use("ggplot")
pd.Series(seasonal_returns1, index=dates).cumprod().plot(color='red')
pd.Series(seasonal_returns2, index=dates).cumprod().plot(color='blue')
###Output
_____no_output_____ |
2021/Cviceni 2.ipynb | ###Markdown
Hledรกnรญ koลenลฏNaprogramujte hledรกnรญ koลenลฏ metodou pลฏlenรญ intervalu.Najdฤte koลen sin(x) mezi 3 a 4 metodou pลฏlenรญ intervalu
###Code
a, b = 3, 4
f = np.sin
def bisect(f, a, b, maxiter=53):
# ukol: definujte tฤlo tรฉto funkce
if np.sign(f(a))*np.sign(f(b)) >= 0:
raise(ValueError("Function sign must differ at a and b"))
for i in range(maxiter):
m = (a+b)/2.
fm = f(m)
if m in [a, b] or fm == 0:
# floating point tolerance reached or exact solution found
return m
if fm*np.sign(f(a)) < 0:
b = m
elif fm*np.sign(f(b)) < 0:
a = m
return m
bisect(f, 3, 4)
###Output
_____no_output_____
###Markdown
Newtonova metoda
###Code
def newton(f, df, a):
for i in range(10):
a_new = a - f(a)/df(a)
if a_new == a:
return a
a = a_new
df = np.cos
newton(f, df, 4.8), newton(f, df, 4.)
###Output
_____no_output_____
###Markdown
Minimalizace รkol ฤ. 1: naprogramujte hledรกnรญ minima metoda zlatรฉho ลezu (dฤlenรญ intervalu).Najdฤte minimum funkce `cos(x)` v intervalu `[2, 4]`. Sledujte rychlost konvergence - tedy pลesnost urฤenรญ polohy minima `xmin` a funkฤnรญ hodnoty `cos(xmin)` v minimu v zรกvislosti na poฤtu iteracรญ. S jakou pลesnostรญ lze tyto parametry urฤit? Potรฉ najdฤte minimum funkce `1 + (x-0.1)**4` na intervalu `[-1, 1]` a obdobnรฝm zpลฏsobem zhodnoลฅte pลenost urฤenรญ minima.
###Code
def golden_min(f, a, b, tol=1e-5):
iphi = 2/(np.sqrt(5) + 1) # 0.618...
approximations = [[a, b]]
c = b - (b - a) * iphi
d = a + (b - a) * iphi
while np.abs(b - a) > tol:
if f(c) < f(d):
b = d
else:
a = c
approximations.append([a,b])
c = b - (b - a) * iphi
d = a + (b - a) * iphi
return (b + a) / 2, np.array(approximations)
f = np.cos
xmin, approx = golden_min(f, 3, 4, tol=1e-14)
plt.plot(np.arange(approx.shape[0]), approx[:,0])
plt.plot(np.arange(approx.shape[0]), approx[:,1])
plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,0]-np.pi))
plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,1]-np.pi))
plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,0])+1), "--")
plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,1])+1), "--")
plt.grid()
plt.gca().set_yscale("log")
xmin - np.pi # ~sqrt(eps)
f = lambda x: (x-0.1)**4
true_min = 0.1
x = np.linspace(-1, 1)
plt.plot(x, f(x))
xmin, approx = golden_min(f, -1, 1, tol=1e-14)
plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,0]-true_min))
plt.plot(np.arange(approx.shape[0]), np.abs(approx[:,1]-true_min))
plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,0])-f(true_min)), "--")
plt.plot(np.arange(approx.shape[0]), np.abs(f(approx[:,1])-f(true_min)), "--")
plt.grid()
plt.gca().set_yscale("log")
xmin
Newtonova metoda
###Output
_____no_output_____
###Markdown
Ukรกzka Newtonovy metody
###Code
def newton_min(f, df, ddf, a):
for i in range(10):
a_new = a - df(a)/ddf(a)
if a_new == a:
if ddf(a) > 0:
return a
else:
raise(RuntimeError("Method did not converge to minimum"))
a = a_new
def f(x): return x**2 + x
def df(x): return 2*x + 1
def ddf(x): return 2
newton_min(f, df, ddf, 1)
x = np.linspace(-2, 2)
plt.plot(x, f(x))
plt.ylim(ymax=2)
plt.grid()
###Output
_____no_output_____ |
14-.ipynb | ###Markdown
${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{0}}}$ L0่ๆฐ:่กจ็คบๅ้xไธญ้้ถๅ
็ด ็ไธชๆฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{1}}}$ L1่ๆฐ๏ผๅๆจก๏ผ:่กจ็คบๅ้xไธญ้้ถๅ
็ด ็็ปๅฏนๅผไนๅ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{2}}}$ L2่ๆฐ๏ผ่ฐฑๆจก๏ผ:่กจ็คบๅ้ๅ
็ด ็ๅนณๆนๅๅๅผๅนณๆนๆฑ$A^{T}A$็็นๅพๅผ๏ผๆพๅบๅ
ถไธญ็ๆๅคง็นๅพๅผ๏ผๆฑๅ
ถๅนณๆนๆ น,็ธๅฝไบ$max(sqrt(eig(A^{T}A)))$๏ผไนๅซ่ฐฑ่ๆฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{F}}}$ F่ๆฐ:ๆฏๆไธไธช็ฉ้ตไธญๆฏไธชๅ
็ด ็ๅนณๆนๆฑๅๅๅผๆ นๅท(็จไบ่กจ็คบ็ฉ้ต้็บง) ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{\infty}}}$ ๆ ็ฉท่ๆฐ๏ผ่กๆจก๏ผ:ๅบฆ้ๅ้ๅ
็ด ็ๆๅคงๅผ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{*}}}$ ๆ ธ่ๆฐNuclear Norm:็ฉ้ตๅฅๅผๅผ็ๅ(็จไบ่กจ็คบไฝ็งฉ็ฉ้ต) ๅญ็ฉบ้ด่็ฑป๏ผSubspace clustering๏ผ[Subspace clustering](https://towardsdatascience.com/subspace-clustering-7b884e8fff73) ๆฌๆ็จ่งฃๅณไปฅไธ้ฎ้ข๏ผ 1. ้ซ็ปดๆฐๆฎ็ๅค็ๅญๅจไปไนๆๆ๏ผ+ ไปไนๆฏๅญ็ฉบ้ด่็ฑป๏ผ+ ๅฆไฝ็จpython่ฟ่กๅญ็ฉบ้ด่็ฑป ้ซ็ปดๆฐๆฎๆไปๅ ๅๅฐ็่ณๅ ๅไธช็ปดๅบฆใๆฏๅฆ๏ผODๆฐๆฎ ้ซ็ปดๆฐๆฎ็ๅค็ๅญๅจๅฐ้พ๏ผ1. ๅฏ่งๅๅฐ้พ๏ผๅพ้พ็่งฃๆฐๆฎ้ฟไปไนๆ ท๏ผๅ ๆญคๅฎ้่ฆ้็ปดใไนๅฏผ่ดไบ็ปดๅบฆ็พ้พ๏ผๅณๅพ้พๅฏนๆฏไธชๅญ็ปดๅบฆ่ฟ่กๆไธพ่ฟญไปฃ+ ๅ้ข้็ปดๆๆฏ็้ๆฉไผๆๅคง็ๅฝฑๅๅ็ปญ็่็ฑปๆๆ+ ่ฎธๅค็ปดๅบฆๅฏ่ฝๆฏไธ็ธๅ
ณ็๏ผๅนถไธๅฏไปฅๅจๆๅชๅฃฐ็ๆฐๆฎไธญๅฑ่ฝ็ฐๆ็่็ฑป+ ไธ็งๅธธ่ง็ๆๆฏๆฏๆง่ก็นๅพ้ๆฉ๏ผๅ ้คไธ็ธๅ
ณ็็ปดๅบฆ๏ผ๏ผไฝๆฏๅจๆไบๆ
ๅตไธ๏ผ่ฏๅซๅไฝ็ปดๅบฆๅนถไธๅฎนๆ ไปไนๆฏๅญ็ฉบ้ด่็ฑป๏ผ> ๅญ็ฉบ้ด่็ฑปๆฏไธ็งๅจไธๅๅญ็ฉบ้ดไธญๅ็ฐ่็ฑป็ๆๆฏใ *ๅญ็ฉบ้ดๅณๆฏๆฐๆฎ้้ขไธไธชๆ่
ๅคไธช็ปดๅบฆ็็ปๅ*ๅบๆฌ็ๅ่ฎพๆฏ๏ผๆไปฌๅฏไปฅๆพๅฐๅช็ฑ็ปดๅบฆๅญ้ๅฎไน็ๆๆ่็ฑป๏ผไธ้่ฆๅ
ทๆๆๆNไธช็นๅพ็ไธ่ดๆง๏ผใ > **ไธพไพ:** ๅฆๆๆไปฌ่พๅ
ฅ็
ไบบ็ๅบๅ ๆฐๆฎ(ๆฏไธช็
ไบบ็ๅบๅ ๆ20000ไธชๅฑๆง๏ผๆฐๆฎ็ปดๅบฆๆ20000ไธช)๏ผๆไธ็ฐ็
ไบบๆฃไบๅธ้ๆฃฎ็
๏ผ่ฟไบ็
ไบบๅช้่ฆ็100ไธชๅบๅ ๅฐฑๅฏไปฅ็ฅ้๏ผ้ฃไนๆไปฌ็งฐ่ฟไธชๅญ้ๅๅญๅจไบ100็ปด้ใๆขๅฅ่ฏ่ฏด๏ผๅญ็ฉบ้ด่็ฑปๆฏไผ ็ปN็ปด่็ฑปๅๆ็ๆฉๅฑ๏ผๅฎๅ
่ฎธ้่ฟๅๅปบ**่ก**ๅ**ๅ**ๅๆถ่ฟ่ก่็ฑปใ๏ผไผ ็ป็ๅชๆฏๅฏน่ก่็ฑป๏ผๅญ็ฉบ้ด่็ฑปๆฏๅๆถๅฏน่กๅๅ่็ฑป๏ผ ๅญ็ฉบ้ด็่็ฑป็ปๆๅฏ่ฝๅจๅฑๆง๏ผ่ก๏ผๅ่งๆตๅผ๏ผๅ๏ผไน้ดๆ้ๅ ๏ผๅฆไธๅพ๏ผๅบ่ชๆญค[paper](https://www.kdd.org/exploration_files/parsons.pdf)ใ ๅฏไปฅ็ๅฐ๏ผ่ฟ้็ซ็ถๅฏไปฅๆๆฐๆฎ่ๆ4็ฑป๏ผไธคไธช็ฐไน้ด็ๅ
็ด ๅฏไปฅ็ฆปๅพๅพ่ฟ๏ผไฝไนไธไผๅนฒๆฐๅฐๅญ็ฉบ้ด่็ฑปใไผ ็ป็่็ฑปๆนๆณๅๅพๅฎนๆ่ขซๅนฒๆฐ ไธ้ข็ๆฐๆฎ๏ผๅฆๆไฝ ไปไธไธช็ปดๅบฆ่ง็๏ผไผๅ็ฐๆฏไธช็ปดๅบฆไธญ๏ผ้ฝๆไธๅ็ฐ็ๆฐๆฎ็ณ
ๅๅจไธ่ตท ๅญ็ฉบ้ด่็ฑป็็ง็ฑป ๅบไบๆ็ดข็ญ็ฅ๏ผๆไปฌๅฏไปฅๅบๅไธค็ฑปๅญ็ฉบ้ด่็ฑป็ฎๆณ๏ผ 1. ็ฑไธ่ณไธ็ฎๆณไปๆพๅฐไฝ็ปดๅบฆ็๏ผ1D๏ผ่็ฑปๅผๅง๏ผ้ๆธ่ๅ๏ผไปฅ่ฟ่ก้ซ็ปดๅบฆ็ๅค็ใ+ ็ฑไธ่ณไธ็ฎๆณไปๅ
จ้จ็ปดๅบฆๅผๅงๆพๅฐ่็ฑป๏ผๅฆไฝๅผๅง่ฏไผฐๆฏไธช่็ฑป็ๅญ็ฉบ้ดใไธๅพๅฑ็คบ็ๆฏๅธธ่ง็ๅญ็ฉบ้ด่็ฑป็ฎๆณ ๆดพ็ณป็ฎๆณ๏ผClique algorithm๏ผ ็ฎๅๆฅ่ฏด๏ผ็ฎๆณ็ป่ฟไบไปฅไธๆญฅ้ชค๏ผ> ๅฏนๆฏไธช็ปดๅบฆ(ๅฑๆง๏ผfeature)๏ผๆไปฌๅฐ็ฉบ้ดๅๅฒไธบnBinsไธชๆ ผๅญ๏ผ็ฌฌไธไธชๅๆฐ๏ผ๏ผๅนถๅฏนๆฏไธชๆ ผๅญ่ฎก็ฎ็ดๆนๅพ๏ผๆฐๆฎ้๏ผใๆไปฌๅช่่ๅฏๅบฆๅคง็ๅๅ
(dense units)๏ผๅณๆ ผๅญ้ๆฐๆฎ้ๅคงไบๆไธช็ปๅฎ็้ๅผnPoints๏ผ็ฌฌไบไธชๅๆฐ๏ผใๆฏไธชdense unitsๅๅธฆ็ๅฑๆง๏ผ1. ๅฎๆๅจ็็ปดๅบฆ(ๅฑๆง๏ผfeature)2. ๆ ผๅญ็็ผๅท3. ๆ ผๅญไธญ็ๆฐๆฎ [ไปฃ็ ](https://github.com/ciortanmadalina/medium/blob/master/clique_clustering.ipynb)
###Code
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
n_components = 4
data, truth = make_blobs(n_samples=100, centers=n_components, random_state=42, n_features=2)
data = preprocessing.MinMaxScaler().fit_transform(data)
plt.scatter(data[:, 0], data[:, 1], s=50, c = truth)
plt.title(f"Example of a mixture of {n_components} distributions")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2");
###Output
_____no_output_____
###Markdown
่ฏฅๆฐๆฎ้ไธญ๏ผๆ2ไธช็ปดๅบฆ๏ผๅญๅจ4ไธช็ฐใ็ฎๆณ้ๆฉๅๆฐnBins = 8,nPoints = 2ใ ็ฎๆณ้ๆฉ็ฑไธ่ณไธ็ๆ่ทฏ๏ผไป1Dๅผๅงใๅฆๆ2ไธชไปฅไธ็dense unitsไธบ้ปๅฑ
๏ผๅๅฐๅฎไปฌๅๅนถไธบไธไธชๆดๅคง็binใๅฆๆๅฐ่ฟไบๆ ผๅญ่ฝฌๆขไธบ็ฝ็ปๅพ๏ผ่ฟไธชๆไฝๅฐฑๅฏไปฅๅพๅฎนๆๅฐ่ฟ่ก๏ผๅๅปบๅพ็ๆถๅ๏ผๆฏไธชdense unitsไธบ่็น๏ผๅฆๆไธคไธช่็นๅฑไบๅไธ็ปดๅบฆ๏ผ่ไธไปไปฌ็ธ้ป๏ผไปไปฌไน้ด็่ท็ฆปไธ่ถ
่ฟ1๏ผ๏ผๅๅจไธคไธช่็นไน้ด็ๆ่พนใ ๆจช่ฝด็ปดๅบฆๅบๅไธค็ฐ็บต่ฝด็ปดๅบฆๅบๅไธ็ฐ ๆฅไธๆฅ๏ผๆไปฌ่ฆไป2DๅผๅงๅฐๆๆD๏ผ่ฎก็ฎๆๆๅฏ่ก็็็ฐใ่ฟไธชๆไฝๅฏไปฅ่ฝฌๅไธบ่ฎก็ฎkไธช็ปดๅบฆ็dense units็่ๅ๏ผๅนถๅชไฟ็ๆ้ๅ ็ไธ่ฟ็ปญ็dense bins็็ปๆใๅจ่ฎก็ฎๅฎk-1็ปดๅบฆ็dense unitsๅ๏ผๆไปฌๅฏไปฅ้่ฟ่ฎก็ฎๆๆk-1็ปดๅบฆ็dense units็่ๅ่ๆฉๅฑๅฐ็ฌฌk็ปดใ ๅจไธ้ข็ๆฐๆฎไธญ๏ผๆไปฌๅฏไปฅๅพๅฐไธๅพ็่็ฑป็ปๆใ ็ดซ่ฒ็นไธๅฑไบไปปไฝ็ฑป็ฐ๏ผๅ ไธบไปไปฌๆๅฑ็ๆ
ๆ ผไธญๆฐๆฎไธชๆฐๅฐไบ2ไธช๏ผnPoints๏ผ ๆดพ็ณป็ฎๆณๅฏนๅฎๅๆฐ็้ๆฉ้ๅธธๆๆ๏ผnBinsๅnPoints๏ผใไธ่ฟ๏ผๅฎๆฏ็ฑไธ่ณไธ็ฎๆณๅฎถๆไธญๆๅบ็ก็็ฎๆณใ ๅบไบ่ฐฑ่็ฑป็ๅญ็ฉบ้ด่็ฑป ๅฎ้
ไธ๏ผ่ฐฑ่็ฑปๅฎ็ฐ็ๅฐฑๆฏๅญ็ฉบ้ด่็ฑป ๅๆณไธไธ๏ผๆไปฌๅจ่ฐฑ่็ฑปๆๅไธๆญฅ็ๆถๅ,$L$็ฉ้ตๆ$n$ไธช็ปดๅบฆ๏ผๆไปฌๅช้่ฟๆๅฐ็$k$ไธช็นๅพๅผๆๅฏนๅบ็็นๅพๅ้ๅฏนๆฐๆฎ่ฟ่กkmeans่็ฑป๏ผๅฐฑๅช่่ไบ$k$ไธช็ปดๅบฆ๏ผ่ฟไธๅฐฑๆฏๅญ็ฉบ้ดๅ ๅ้ขๆไปฌ่ฎฒๅฐ๏ผ่ฐฑ่็ฑปไธญ้่ฆๆๅปบ็ธไผผ็ฉ้ต(affinity matrix)๏ผไปฅ$W$ๆ่
$A$่กจ็คบ Self-Expressiveness affinity๏ผๅบไบ่ช่กจ่พพๆง็็ธไผผๅบฆ็ฉ้ต๏ผๆฆๅฟต๏ผไป็บฟๆงๅญ็ฉบ้ดไธญๆๅ็ไธไธชๆฐๆฎ็น$x_i$ๅฏไปฅ็ฑๅไธๅญ็ฉบ้ดไธญๅ
ถไป็น็็บฟๆง็ปๅ่กจ็คบๅฐๆๆ็นๅ ๅ ๅฐๆฐๆฎ็ฉ้ต$X$็ๅไธญ๏ผๅ
ถ่ช่กจ่พพๆงๅฏไปฅ็ฎๅๅฐๆ่ฟฐไธบ$$X = XC$$ๅ
ถไธญ$C$ไธบSelf-Expressiveness็ณปๆฐ็ฉ้ต๏ผ$X$็ฉ้ตไธบ$(n*m)$๏ผ$C$็ฉ้ตไธบ$(m*m)$๏ผ$XC$็ฉ้ตไธบ$(n*m)$๏ผ$n$ไธบๆฐๆฎ่ฎฐๅฝๆฐ๏ผ$m$ไธบๆฏๆกๆฐๆฎ็็ปดๅบฆใๅจ่ฟ้๏ผไปฅOD็ฉ้ตไธบไพ๏ผ$n$ไธบODๅฏนๆฐ๏ผ$m$ไธบๆถ้ด๏ผๅณ |X็ฉ้ต|1ๆฅ|2ๆฅ|...|mๆฅ||----||||||AๅฐB|20|30|...|40||AๅฐC|20|30|...|40||...|...|...|...|...||n|20|30|...|40||C็ฉ้ต|1ๆฅ|2ๆฅ|...|mๆฅ||----||||||1ๆฅ|0|็ธไผผๅบฆ|...|็ธไผผๅบฆ||2ๆฅ|็ธไผผๅบฆ|0|...|็ธไผผๅบฆ||...|...|...|...|...||mๆฅ|็ธไผผๅบฆ|็ธไผผๅบฆ|...|0|ๅ่ฎพๅญ็ฉบ้ด้ฝๆฏ็ฌ็ซ็๏ผๆๅฐๅ$C$็ฉ้ต็่ๆฐ๏ผๅฐฑๅฏไปฅไฟ่ฏ$C$ๅจไปฅๆไบ้กบๅบๆๅ็ๆถๅๅบ็ฐๅ็ถๅฏน่ง็บฟ็ปๆ๏ผไนๅ ๆญคๅฏไปฅไปฅ$C$็ฉ้ตๆฅๆๅปบ็ธไผผๅบฆ็ฉ้ต็จไบ่ฐฑ่็ฑป[P. Ji, M. Salzmann, and H. Li. Efficient dense subspace clustering. In WACV, pages 461โ468. IEEE, 2014.] ๅจๆฐๅญฆไธไธบ๏ผ $$min{{ \left\Vert {C} \right\Vert }_{p}}$$ $$s.t. diag(C)=0$$ $$X = XC$$ $$C\geqslant0$$ ๅ
ถไธญ๏ผ${ \left\Vert {C} \right\Vert }_{p}$ไธบ็ฉ้ต$C$็ไปปๆ่ๆฐๆพๅฐ่ฟๆ ทๅญ็$C$,ๅฐฑๅฏไปฅๆๅปบๅบ่ฐฑ่็ฑปๆ้็็ธไผผๅบฆ็ฉ้ต$A$:$$A=C^T+C$$ *ๅธธ่ง็็ฉ้ต่ๆฐๆป็ป ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{0}}}$ L0่ๆฐ:่กจ็คบๅ้xไธญ้้ถๅ
็ด ็ไธชๆฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{1}}}$ L1่ๆฐ๏ผๅๆจก๏ผ:่กจ็คบๅ้xไธญ้้ถๅ
็ด ็็ปๅฏนๅผไนๅ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{2}}}$ L2่ๆฐ๏ผ่ฐฑๆจก๏ผ:่กจ็คบๅ้ๅ
็ด ็ๅนณๆนๅๅๅผๅนณๆนๆฑ$A^{T}A$็็นๅพๅผ๏ผๆพๅบๅ
ถไธญ็ๆๅคง็นๅพๅผ๏ผๆฑๅ
ถๅนณๆนๆ น,็ธๅฝไบ$max(sqrt(eig(A^{T}A)))$๏ผไนๅซ่ฐฑ่ๆฐ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{F}}}$ F่ๆฐ:ๆฏๆไธไธช็ฉ้ตไธญๆฏไธชๅ
็ด ็ๅนณๆนๆฑๅๅๅผๆ นๅท(็จไบ่กจ็คบ็ฉ้ต้็บง) ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{\infty}}}$ ๆ ็ฉท่ๆฐ๏ผ่กๆจก๏ผ:ๅบฆ้ๅ้ๅ
็ด ็ๆๅคงๅผ ${{ \left\Vert {X} \right\Vert }\mathop{{}}\nolimits_{{*}}}$ ๆ ธ่ๆฐNuclear Norm:็ฉ้ตๅฅๅผๅผ็ๅ(็จไบ่กจ็คบไฝ็งฉ็ฉ้ต) ่ๅจๅฎ้
็ๆฐๆฎ้ไธญ๏ผๅญๅจๆฐๆฎๅชๅฃฐ๏ผๅ ๆญคๅฐ้ฎ้ข่ฐๆดไธบ๏ผ $$min{{ \left\Vert {C} \right\Vert }\mathop{{}}\nolimits_{{F}}}+\frac{\lambda}{2}{{ \left\Vert {X-XC} \right\Vert }\mathop{{}}\nolimits_{{F}}}^2$$ $$s.t. diag(C)=0$$ $$C\geqslant0$$ ๆพๅฐ่ฟๆ ทๅญ็$C$,ๅฐฑๅฏไปฅ็จ่ฐฑ่็ฑป ไธ้ข็็ฎๆ ๅฝๆฐ๏ผๅ
ถๅฎ็ธๅฝไบๅๆถ่พพๆไธคไธช็ฎๆ ๏ผๅณ๏ผ1. ${ \left\Vert {C} \right\Vert }\mathop{{}}\nolimits_{{F}}$ๆๅฐ๏ผ่ฟไธชๆฏไธบไบๆปก่ถณSelf-Expressiveness็ณปๆฐ็ฉ้ตๅฎไน2. ${ \left\Vert {X-XC} \right\Vert }\mathop{{}}\nolimits_{{F}}$ๆๅฐ๏ผๅฎ้
ไธ$XC$ๆฏๆไปฌๆๅปบๅบๆฅ็็ฉ้ต๏ผ$X-XC$ๆฏๅชๅฃฐ็ฉ้ต๏ผๅณๅชๅฃฐๆๅฐ ้ฃไน๏ผ่ฟไธช็ฎๆ ๅฝๆฐ็ธๅฝไบๆไปฌ่ฝๅคๅๆถๅฎๆๅปๅชๅฃฐๅ้็ปด็ๅทฅไฝ,ไพๅฆ๏ผ ้ฃไน๏ผๅฆไฝๆฑ่งฃ๏ผ$C$็ฉ้ตๆไน่ทๅพ? Deep Adversarial Subspace Clustering๏ผ2018ๅนดๆบๅจๅญฆไน ้ขๅ่ฎบๆ๏ผ [ใ่ฎบๆ้
่ฏปใDeep Adversarial Subspace Clustering](https://www.cnblogs.com/EstherLjy/p/9840016.html) [Deep-subspace-clustering-networks github](https://github.com/panji1990/Deep-subspace-clustering-networks) ็ปๅบ็่งฃๅณๆนๆกๆฏๆญๅปบDeep Convolutional Auto-Encoder ๆๅคฑๅฝๆฐไธบ ๅ
ถไธญ
###Code
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
EPOCH = 100
BATCH_SIZE = 500
LR = 0.01
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(
root = './mnist',
train = True,
transform = torchvision.transforms.ToTensor(), #ไป0-255ๅ็ผฉๅฐ0-1
download =DOWNLOAD_MNIST
)
# ๅ
่ฝฌๆขๆ torch ่ฝ่ฏๅซ็ Dataset
torch_dataset = Data.TensorDataset(train_data.train_data[:BATCH_SIZE], train_data.train_labels[:BATCH_SIZE])
# ๆ dataset ๆพๅ
ฅ DataLoader
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=False, # ่ฆไธ่ฆๆไนฑๆฐๆฎ (ๆไนฑๆฏ่พๅฅฝ)
num_workers=2, # ๅค็บฟ็จๆฅ่ฏปๆฐๆฎ
)
i = 0
def printdata(x,y):
plt.imshow(x,cmap = 'gray')
plt.title(y)
plt.show()
printdata(train_data.train_data[i],str(train_data.train_labels[i].numpy()))
import numpy as np
class DSC(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28,128),
nn.Tanh(),
nn.Linear(128,64),
nn.Tanh(),
nn.Linear(64,12),
nn.Tanh(),
nn.Linear(12,5),
)
self.selfexpr = nn.Linear(BATCH_SIZE,BATCH_SIZE,bias=False)
self.decoder = nn.Sequential(
nn.Linear(5,12),
nn.Tanh(),
nn.Linear(12,64),
nn.Tanh(),
nn.Linear(64,128),
nn.Tanh(),
nn.Linear(128,28*28),
nn.Sigmoid()
)
def forward(self,x):
z = self.encoder(x)
z = torch.transpose(z, 1, 0)
z_ = self.selfexpr(z)
z_ = torch.transpose(z_, 1, 0)
x_ = self.decoder(z_)
return z,z_,x_
dsc = DSC()
dsc.cuda()
optimizer = torch.optim.Adam(dsc.parameters(),lr = LR,betas = (0.9,0.99))
loss_func = torch.nn.MSELoss()
loss_list = []
lambda1 = 1
lambda2 = 10**(epoch/10-3)
steps = []
for epoch in range(1000): # ่ฎญ็ปๆๆ!ๆดๅฅ!ๆฐๆฎ EPOCH ๆฌก
for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธๆญฅ loader ้ๆพไธๅฐๆนๆฐๆฎ็จๆฅๅญฆไน
steps.append(step)
batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255).cuda()
batch_y = batch_y.cuda()
z,z_,x_ = dsc(batch_x)
C = dsc.selfexpr.state_dict()['weight']
loss = 1/2*torch.norm(x_-batch_x)**2+lambda1*torch.norm(C)+lambda2/2*torch.norm(torch.transpose(z, 1, 0)-z_)**2
loss_list.append(loss)
optimizer.zero_grad() #ๅๅงๅๆขฏๅบฆ
loss.backward() #่ฎก็ฎๆขฏๅบฆ
optimizer.step() #ๅฏนๅๆฐๅญฆไน
import IPython
IPython.display.clear_output(wait=True)
plt.plot(range(len(steps[-50:])),loss_list[-50:])
plt.ylabel('loss')
plt.xlabel('train step')
plt.show()
C = dsc.selfexpr.state_dict()['weight'].cpu().numpy()
plt.imshow(C,cmap = 'Greys')
plt.show()
for epoch in range(1000): # ่ฎญ็ปๆๆ!ๆดๅฅ!ๆฐๆฎ EPOCH ๆฌก
for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธๆญฅ loader ้ๆพไธๅฐๆนๆฐๆฎ็จๆฅๅญฆไน
steps.append(step)
batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255).cuda()
batch_y = batch_y.cuda()
z,z_,x_ = dsc(batch_x)
C = dsc.selfexpr.state_dict()['weight']
loss = 1/2*torch.norm(x_-batch_x)**2+lambda1*torch.norm(C)+lambda2/2*torch.norm(torch.transpose(z, 1, 0)-z_)**2
loss_list.append(loss)
optimizer.zero_grad() #ๅๅงๅๆขฏๅบฆ
loss.backward() #่ฎก็ฎๆขฏๅบฆ
optimizer.step() #ๅฏนๅๆฐๅญฆไน
import IPython
IPython.display.clear_output(wait=True)
plt.plot(range(len(steps[-50:])),loss_list[-50:])
plt.ylabel('loss')
plt.xlabel('train step')
plt.show()
i = 10
def printdata(x,y):
plt.imshow(x,cmap = 'gray')
plt.title(y)
plt.show()
printdata(x_[i].view(28,28).cpu().data.numpy(),'test')
def printdata(x,y):
plt.imshow(x,cmap = 'gray')
plt.title(y)
plt.show()
printdata(batch_x[i].view(28,28).cpu().data.numpy(),'test')
A = C+C.T
plt.imshow(A,cmap = 'Greys')
plt.show()
D = np.diag(A.sum(axis=1))
L = D - A
#่ฎก็ฎ็นๅพๅผๅ็นๅพๅ้
vals, vecs = np.linalg.eig(L)
#้ๆฐๆๅบ
vecs = vecs[:,np.argsort(vals)]
vals = vals[np.argsort(vals)]
n = 10
#็จkmeansๅฏน็ฌฌ2ๅฐ็ฌฌ4ไธช็นๅพๅ้่ๆ4็ฑป
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=n)
kmeans.fit(vecs[:,:n])
spectral_labels = kmeans.labels_
spectral_labels
batch_y
###Output
_____no_output_____
###Markdown
ๅฎ้จใๅงใพใใพใ๏ผ pre training strategies
###Code
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
EPOCH = 2
BATCH_SIZE = 50
LR = 0.01
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(
root = './mnist',
train = True,
transform = torchvision.transforms.ToTensor(), #ไป0-255ๅ็ผฉๅฐ0-1
download =DOWNLOAD_MNIST
)
# ๅ
่ฝฌๆขๆ torch ่ฝ่ฏๅซ็ Dataset
torch_dataset = Data.TensorDataset(train_data.train_data, train_data.train_labels)
# ๆ dataset ๆพๅ
ฅ DataLoader
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=True, # ่ฆไธ่ฆๆไนฑๆฐๆฎ (ๆไนฑๆฏ่พๅฅฝ)
num_workers=2, # ๅค็บฟ็จๆฅ่ฏปๆฐๆฎ
)
class AutoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28,128),
nn.Tanh(),
nn.Linear(128,64),
nn.Tanh(),
nn.Linear(64,12),
nn.Tanh(),
nn.Linear(12,10),
)
self.decoder = nn.Sequential(
nn.Linear(10,12),
nn.Tanh(),
nn.Linear(12,64),
nn.Tanh(),
nn.Linear(64,128),
nn.Tanh(),
nn.Linear(128,28*28),
nn.Sigmoid()
)
def forward(self,x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded,decoded
autoencoder = AutoEncoder()
autoencoder.cuda()
optimizer = torch.optim.Adam(autoencoder.parameters(),lr = LR,betas = (0.9,0.99))
loss_func = torch.nn.MSELoss()
loss_list = []
for epoch in range(EPOCH): # ่ฎญ็ปๆๆ!ๆดๅฅ!ๆฐๆฎ EPOCH ๆฌก
for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธๆญฅ loader ้ๆพไธๅฐๆนๆฐๆฎ็จๆฅๅญฆไน
batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255).cuda()
batch_y = batch_y.cuda()
encoded,decoded = autoencoder(batch_x)
loss = loss_func(decoded,batch_x)
loss_list.append(loss)
optimizer.zero_grad() #ๅๅงๅๆขฏๅบฆ
loss.backward() #่ฎก็ฎๆขฏๅบฆ
optimizer.step() #ๅฏนๅๆฐๅญฆไน
import IPython
IPython.display.clear_output(wait=True)
plt.ylabel('loss')
plt.xlabel('train step')
plt.plot(range(len(loss_list)),loss_list,label='Adam')
plt.legend()
plt.show()
def printdata(x,y):
plt.imshow(x,cmap = 'gray')
plt.title(y)
plt.show()
for i in range(3):
printdata(decoded[i].view(28,28).cpu().data.numpy(),str(batch_y[i].cpu().numpy()))
printdata(batch_x[i].view(28,28).cpu().numpy(),str(batch_y[i].cpu().numpy()))
###Output
_____no_output_____
###Markdown
fine-tuning strategies ๆๅๅๆฐ
###Code
import numpy as np
latent_features = 10
class DSC(nn.Module):
def __init__(self,autoencoder,BATCH_SIZE):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28*28,128),
nn.Tanh(),
nn.Linear(128,64),
nn.Tanh(),
nn.Linear(64,12),
nn.Tanh(),
nn.Linear(12,10),
)
self.encoder.load_state_dict(autoencoder.encoder.state_dict())
kill_matrix = np.ones((BATCH_SIZE,BATCH_SIZE))
for i in range(BATCH_SIZE):
kill_matrix[i][i]=0
self.k = torch.tensor(kill_matrix,dtype=torch.float, requires_grad=False)
self.m = torch.zeros([BATCH_SIZE,BATCH_SIZE],dtype=torch.float ,requires_grad=True)
self.decoder = nn.Sequential(
nn.Linear(10,12),
nn.Tanh(),
nn.Linear(12,64),
nn.Tanh(),
nn.Linear(64,128),
nn.Tanh(),
nn.Linear(128,28*28),
nn.Sigmoid()
)
self.decoder.load_state_dict(autoencoder.decoder.state_dict())
### //ไธ่ฎญ็ปๆไบๅฑ
frozen_layers = [self.encoder, self.decoder,]
for layer in frozen_layers:
for name, value in layer.named_parameters():
value.requires_grad = False
def forward(self,x):
z = self.encoder(x)
z = torch.transpose(z, 1, 0)
c = self.k.mul(self.m)
z_ = z.mm(c)
x_ = torch.transpose(z_, 1, 0)
x_ = self.decoder(x_)
return z,z_,x_,c
###Output
_____no_output_____
###Markdown
ๅผๅง็ผไธน
###Code
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(
root = './mnist',
train = True,
transform = torchvision.transforms.ToTensor(), #ไป0-255ๅ็ผฉๅฐ0-1
download =DOWNLOAD_MNIST
)
# ๅ
่ฝฌๆขๆ torch ่ฝ่ฏๅซ็ Dataset
torch_dataset = Data.TensorDataset(train_data.train_data[:BATCH_SIZE], train_data.train_labels[:BATCH_SIZE])
# ๆ dataset ๆพๅ
ฅ DataLoader
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=False, # ่ฆไธ่ฆๆไนฑๆฐๆฎ (ๆไนฑๆฏ่พๅฅฝ)
num_workers=2, # ๅค็บฟ็จๆฅ่ฏปๆฐๆฎ
)
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import matplotlib.pyplot as plt
K = 10
EPOCH = 50+25*K
BATCH_SIZE = 64
LR = 0.01
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(
root = './mnist',
train = True,
transform = torchvision.transforms.ToTensor(), #ไป0-255ๅ็ผฉๅฐ0-1
download =DOWNLOAD_MNIST
)
# ๅ
่ฝฌๆขๆ torch ่ฝ่ฏๅซ็ Dataset
torch_dataset = Data.TensorDataset(train_data.train_data[:BATCH_SIZE], train_data.train_labels[:BATCH_SIZE])
# ๆ dataset ๆพๅ
ฅ DataLoader
loader = Data.DataLoader(
dataset=torch_dataset, # torch TensorDataset format
batch_size=BATCH_SIZE, # mini batch size
shuffle=False, # ่ฆไธ่ฆๆไนฑๆฐๆฎ (ๆไนฑๆฏ่พๅฅฝ)
num_workers=2, # ๅค็บฟ็จๆฅ่ฏปๆฐๆฎ
)
dsc = DSC(autoencoder,BATCH_SIZE)
dsc
optimizer = torch.optim.Adam([dsc.m],lr = LR,betas = (0.9,0.99),weight_decay=1e-5)
loss_func = torch.nn.MSELoss()
loss_list = []
lambda1 = 1
lambda2 = 10**(K/10-3)
steps = []
t = 0
for epoch in range(EPOCH): # ่ฎญ็ปๆๆ!ๆดๅฅ!ๆฐๆฎ EPOCH ๆฌก
for step, (batch_x, batch_y) in enumerate(loader): # ๆฏไธๆญฅ loader ้ๆพไธๅฐๆนๆฐๆฎ็จๆฅๅญฆไน
t+=1
steps.append(t)
batch_x = (batch_x.view(-1,28*28).type(torch.FloatTensor)/255)
batch_y = batch_y
batch_y,index_ = batch_y.sort()
batch_x = batch_x[index_]
z,z_,x_,c = dsc(batch_x)
loss = 1/2*torch.norm(x_-batch_x)**2+lambda1*torch.norm(c)+\
lambda2/2*torch.norm(z-z_)**2
loss_list.append(loss)
optimizer.zero_grad() #ๅๅงๅๆขฏๅบฆ
loss.backward() #่ฎก็ฎๆขฏๅบฆ
optimizer.step() #ๅฏนๅๆฐๅญฆไน
import IPython
IPython.display.clear_output(wait=True)
plt.plot(steps[-30:],loss_list[-30:])
plt.ylabel('loss')
plt.xlabel('train step')
plt.show()
C = c.data.numpy()
plt.imshow(C,cmap = 'Greys')
plt.show()
def printdata(x,y):
plt.imshow(x,cmap = 'gray')
plt.title(y)
plt.show()
for i in range(3):
printdata(x_[i].view(28,28).cpu().data.numpy(),str(batch_y[i].cpu().numpy()))
printdata(batch_x[i].view(28,28).cpu().numpy(),str(batch_y[i].cpu().numpy()))
np.matmul(z.numpy(),C),z.data.numpy()
C
C = c.data.numpy()
plt.imshow(abs(C),cmap = 'Greys')
plt.show()
A = abs(C)+abs(C.T)
plt.imshow(A,cmap = 'Greys')
plt.show()
D = np.diag(A.sum(axis=1))
L = D - A
#่ฎก็ฎ็นๅพๅผๅ็นๅพๅ้
vals, vecs = np.linalg.eig(L)
#้ๆฐๆๅบ
vecs = vecs[:,np.argsort(vals)]
vals = vals[np.argsort(vals)]
n = 2
#็จkmeansๅฏน็ฌฌ2ๅฐ็ฌฌ4ไธช็นๅพๅ้่ๆ4็ฑป
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=n)
kmeans.fit(vecs[:,:n])
spectral_labels = kmeans.labels_
spectral_labels
plt.plot(range(len(vals)),vals)
import pandas as pd
a = pd.DataFrame(vals)
eigngap = a-a.shift().fillna(0)
plt.plot(range(len(eigngap)),eigngap)
plt.xlim(0,50)
plt.show()
z.size()
###Output
_____no_output_____ |
Files/2017-10-24-Sensors-for-Smart-Cities.ipynb | ###Markdown
**Smart Cities: the use of sensors to tackle the Urban Heat Island effect** *A Data Science for Smart Cities project carried out with Jupyter and Python* Data source: [Road Weather Information Stations, City of Seattle Open Data Portal.](https://data.seattle.gov/Transportation/Road-Weather-Information-Stations/egc4-d24i)
###Code
#Import libraries
import dask.dataframe as dd #High-performance data frame handler
import pandas as pd
import numpy as np
#Locate file and load it as data frame with proper data types
filename = 'Road_Weather_Information_Stations.csv'
df = dd.read_csv(filename,
dtype={'StationName': str, 'StationLocation': object,
'RecordId':int, 'RoadSurfaceTemperature': float, 'AirTemperature': float},
parse_dates=['DateTime'])
#Visualize first 5 rows
df.head(5)
#Visualize last 5 of 708,891 rows
df.tail(5)
#Visualize data types
df._meta.dtypes #object=string
###Output
_____no_output_____
###Markdown
Notice Python offers powerful tools to interact with very large files, such as the dask package, but one significant costraint is the machine on which it operates.Machines with limited resources require long computational time, **slowing down the working pipeline**. For this reason, the City of Seattle Open Data Portal allows to quickly query and slice the dataset online. This approach was followed in order to **preserve the computational resources**, an option that should always be considered whenever possible.The hottest month on average is August, so the online query covers the 01 August 2016 - 31 August 2016 period.
###Code
import pandas as pd
import datetime as DT
#Load the csv file for August 2016
filename='August2016_Road_Weather_Information_Stations.csv'
august=pd.read_csv(filename,
dtype={'StationName': str, 'StationLocation': object,
'RecordId':int, 'RoadSurfaceTemperature': float, 'AirTemperature': float})
#Visualize first 5 values
august.head(5)
#Visualize data types
august.dtypes #object=string
#Separate date from time
august['Date'] = [d.date() for d in august['DateTime']]
august['Time'] = [d.time() for d in august['DateTime']]
#Visualize
august.head(5)
#Drop DateTime Air and Temperature column
august=august.drop('DateTime', axis=1)
august=august.drop('AirTemperature', axis=1)
#Sort the dataframe
august.sort_values(by=['StationName'], inplace=True)
#Set the index to be this and don't drop
august.set_index(keys=['StationName'], drop=False,inplace=True)
#Get a list of stations
stations=august['StationName'].unique().tolist()
#Create split dataframes
alaskan = august.loc[august.StationName=='AlaskanWayViaduct_KingSt']
albro = august.loc[august.StationName=='AlbroPlaceAirportWay']
aurora = august.loc[august.StationName=='AuroraBridge']
harbor = august.loc[august.StationName=='HarborAveUpperNorthBridge']
joseriza = august.loc[august.StationName=='JoseRizalBridgeNorth']
magnolia = august.loc[august.StationName=='MagnoliaBridge']
myrtle = august.loc[august.StationName=='35thAveSW_SWMyrtleSt']
ne45 = august.loc[august.StationName=='NE45StViaduct']
roosevelt = august.loc[august.StationName=='RooseveltWay_NE80thSt']
spokane = august.loc[august.StationName=='SpokaneSwingBridge']
#Create list with data frame names for reference
station_list=[alaskan,albro,aurora,harbor,joseriza,magnolia,myrtle,ne45,roosevelt,spokane]
#Compute average temperature for each time interval and station (e.g., time series) for August 2016
#and plot the series
import matplotlib.pyplot as plt
#Create empty image
fig = plt.figure(figsize=(15,18))
plt.subplots_adjust(hspace=0.3)
#Empty lists to store peak times, station names, and max road surface temperatures
peak=[]
names=[]
#Loop through the station list
for i, station in enumerate(station_list):
mean_t=station.groupby('Time').RoadSurfaceTemperature.mean()
#Convert Fahrenheit degrees into Celsius degrees
mean_t=mean_t.apply(lambda x: ((x - 32) * 5/9))
#Save station names, peak times, and temperatures
peak.append(mean_t.idxmax().strftime("%H:%M:%S"))
names.append(str(station.StationName[1]))
#Add plot
ax = fig.add_subplot(5, 2, i+1)
plt.scatter(mean_t.index, mean_t)
plt.title(str(station.StationName[1]))
plt.xlabel("Time")
plt.ylabel("degrees Celsius")
plt.ylim(15,40)
plt.show()
#Show peak times
peak_df=pd.DataFrame({'Station Name' : names, 'Peak Time': peak})
peak_df=peak_df[['Station Name','Peak Time']]
peak_df
#Extract average road surface temperatures for the 3:50PM-4:10PM interval
#Empty dictionary to store station names and average peak road surface temperatures
peak_temp={}
#Loop through the station list
for i, station in enumerate(station_list):
mean_t=station.groupby('Time').RoadSurfaceTemperature.mean()
mean_t=mean_t.apply(lambda x: ((x - 32) * 5/9))
#Save the average temperature between 3:50PM and 4:10PM
peak_temp[str(station.StationName[1])]=round(mean_t[950:971].mean(),3)
#Visualize data frame
max_t=pd.DataFrame({'Station Name':peak_temp.keys(), 'Max Temperature': peak_temp.values()})
max_t=max_t[['Station Name', 'Max Temperature']]
max_t
###Output
_____no_output_____
###Markdown
Interactive plotting
###Code
#Sensor locations
sensors=pd.read_csv('sensors.csv')
#Add temperature column
complete=pd.merge(sensors, max_t, on='Station Name', how='outer')
complete
import numpy as np
import pandas as pd
import folium
import branca
import matplotlib.pyplot as plt
#Set coordinates
SEATTLE_COORDINATES = (47.59844, -122.33561)
#Empty map zoomed in on Seattle
map = folium.Map(location=SEATTLE_COORDINATES, zoom_start=11.48, tiles='Stamen Terrain')
#Create group of circle markers
f = folium.map.FeatureGroup()
lats=complete['Lat'].tolist()
lngs=complete['Lon'].tolist()
sizes=complete['Max Temperature'].tolist()
popup=complete['Station Name'].tolist()
#Colormap
colors=np.asarray(sizes)
cm = branca.colormap.LinearColormap(['green', 'yellow', 'red'], vmin=25, vmax=41)
cm.caption = 'Road Surface Temperature [degrees Celsius]'
map.add_child(cm)
#Add color-coded circles
for lat, lng, size, color in zip(lats, lngs, sizes, colors):
f.add_child(folium.features.CircleMarker(
[lat, lng],
radius=size,
color=None,
#popup=popup,
fill_color=cm(color)))
map.add_child(f)
#Add markers with popups
for each in complete[0:10].iterrows():
folium.Marker([each[1]['Lat'],each[1]['Lon']],
popup='Bridge: '+each[1]['Station Name']+'; Max Temp: '+str(each[1]['Max Temperature'])).add_to(map)
#Display
map
###Output
_____no_output_____ |
test/py_function_file_test.ipynb | ###Markdown
Variable Length Arguments
###Code
def vfunc(arg1, *args):
print('arg1: ', arg1)
if args is not ():
print('>>> args:', args, ', type: tuple, ', end='')
for index, item in enumerate(args):
print('args[{}]:'.format(index), item, end=' ')
return
vfunc(666)
vfunc(33, 44, 55, 66, 77, 88)
###Output
arg1: 666
arg1: 33
>>> args: (44, 55, 66, 77, 88) , type: tuple, args[0]: 44 args[1]: 55 args[2]: 66 args[3]: 77 args[4]: 88
###Markdown
Anonymous / Lambda
###Code
from math import pi
perimeter = lambda r: r * pi * 2
perimeter(10)
###Output
_____no_output_____
###Markdown
Module dir()
###Code
import math
print(dir(math))
###Output
['__doc__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2', 'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tau', 'trunc']
###Markdown
Assertion and Exception
###Code
try :
p = int(input('input a number:'))
assert (p>=0), 'A negative number {} assertion.'.format(p)
print('The number you input: ', p)
except ValueError as e:
print(e)
except AssertionError as e:
print(e)
###Output
input a number:340
The number you input: 340
###Markdown
File access
###Code
# Add timestamp in file.
import datetime
try:
ft = open('sample.txt', 'r+')
ft.seek(0,2)
ft.write('\nNow is {}.'.format(datetime.datetime.now()))
ft.seek(0,0)
chunk = ft.read()
print('file size: {} bytes'.format(ft.tell()))
ft.close()
except IOError:
print('>>> Sorry, file access failed.')
else:
print(chunk)
print('>>> Congrats, file access succeed.')
# Initialize file data.
import datetime
try:
ft = open('sample.txt', 'w')
ft.write('Greets from "sample.txt".')
print('file size: {} bytes'.format(ft.tell()))
ft.close()
except IOError:
print('>>> Sorry, file Initialize failed.')
else:
print('>>> Congrats, file Initialize succeed.')
###Output
file size: 25 bytes
>>> Congrats, file Initialize succeed.
###Markdown
OS Module getcwd()
###Code
import os
print('Current work directory is ', os.getcwd())
###Output
Current work directory is /home/vincent/pythonws/jnb-sample/test
|
Sample app/MQA_app_original.ipynb | ###Markdown
Mortgage Qualifier"""Loan Qualifier Application.This is a command line application to match applicants with mortgage loans.Example: $ mqa.py"""
###Code
#import dependencies
import sys
import fire
import questionary
from pathlib import Path
from qualifier.utils.fileio import (
load_csv,
save_csv,
)
#import calculators
from qualifier.utils.calculators import (
calculate_monthly_debt_ratio,
calculate_loan_to_value_ratio,
)
#import qualifiers
from qualifier.filters.max_loan_size import filter_max_loan_size
from qualifier.filters.credit_score import filter_credit_score
from qualifier.filters.debt_to_income import filter_debt_to_income
from qualifier.filters.loan_to_value import filter_loan_to_value
#load bank data
def load_bank_data():
"""Ask for the file path to the latest banking data and load the CSV file.
Returns:
The bank data from the data rate sheet CSV file.
"""
csvpath = "./data/daily_rate_sheet.csv"
csvpath = Path(csvpath)
if not csvpath.exists():
sys.exit(f"Oops! Can't find this path: {csvpath}")
return load_csv(csvpath)
#load applicant info
def get_applicant_info():
"""Prompt dialog to get the applicant's financial information.
Returns:
Returns the applicant's financial information.
"""
credit_score = questionary.text("What's your credit score?").ask()
debt = questionary.text("What's your current amount of monthly debt?").ask()
income = questionary.text("What's your total monthly income?").ask()
loan_amount = questionary.text("What's your desired loan amount?").ask()
home_value = questionary.text("What's your home value?").ask()
credit_score = int(credit_score)
debt = float(debt)
income = float(income)
loan_amount = float(loan_amount)
home_value = float(home_value)
return credit_score, debt, income, loan_amount, home_value
#find qualifying loans
def find_qualifying_loans(bank_data, credit_score, debt, income, loan, home_value):
"""Determine which loans the user qualifies for.
Loan qualification criteria is based on:
- Credit Score
- Loan Size
- Debit to Income ratio (calculated)
- Loan to Value ratio (calculated)
Args:
bank_data (list): A list of bank data.
credit_score (int): The applicant's current credit score.
debt (float): The applicant's total monthly debt payments.
income (float): The applicant's total monthly income.
loan (float): The total loan amount applied for.
home_value (float): The estimated home value.
Returns:
A list of the banks willing to underwrite the loan.
"""
# Calculate the monthly debt ratio
monthly_debt_ratio = calculate_monthly_debt_ratio(debt, income)
print(f"The monthly debt to income ratio is {monthly_debt_ratio:.02f}")
# Calculate loan to value ratio
loan_to_value_ratio = calculate_loan_to_value_ratio(loan, home_value)
print(f"The loan to value ratio is {loan_to_value_ratio:.02f}.")
# Run qualification filters
bank_data_filtered = filter_max_loan_size(loan, bank_data)
bank_data_filtered = filter_credit_score(credit_score, bank_data_filtered)
bank_data_filtered = filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
print(f"Found {len(bank_data_filtered)} qualifying loans")
return bank_data_filtered
#save Qualifying loans
def save_qualifying_loans(qualifying_loans):
"""Saves the qualifying loans to a CSV file.
Args:
qualifying_loans (list of lists): The qualifying bank loans.
"""
# @TODO: Complete the usability dialog for savings the CSV Files.
# YOUR CODE HERE!
if len(qualifying_loans) > 0:
#Would you like to save?
save = questionary.confirm("Would you like to save?").ask()
if save == True:
#csvpath = Path('qualifying_loans.csv')
csvpath = questionary.text("Where would you like to save?").ask()
save_csv(csvpath, qualifying_loans)
print('writing file...')
if save == False:
sys.exit(f"You chose note to save")
return save
elif len(qualifying_loans) == 0:
sys.exit("Sorry there are no qualifying loans")
#run main function
def run():
"""The main function for running the script."""
# Load the latest Bank data
bank_data = load_bank_data()
# Get the applicant's information
credit_score, debt, income, loan_amount, home_value = get_applicant_info()
# Find qualifying loans
qualifying_loans = find_qualifying_loans(
bank_data, credit_score, debt, income, loan_amount, home_value
)
# Save qualifying loans
save_qualifying_loans(qualifying_loans)
if __name__ == "__main__":
fire.Fire(run)
###Output
_____no_output_____ |
Code/Assignment-9/Independent Analysis.ipynb | ###Markdown
Independent Analysis - Srinivas (handle: thewickedaxe)** PLEASE SCROLL TO THE BOTTOM OF THE NOTEBOOK TO FIND THE QUESTIONS AND THEIR ANSWERS** ** In this notebook we we explore dimensionality reduction with ISOMAP and MDS and their effects on classification** Initial Data Cleaning
###Code
# Standard
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Dimensionality reduction and Clustering
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn import manifold, datasets
from itertools import cycle
# Plotting tools and classifiers
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn import cross_validation
from sklearn.cross_validation import LeaveOneOut
from sklearn.cross_validation import LeavePOut
# Let's read the data in and clean it
def get_NaNs(df):
columns = list(df.columns.get_values())
row_metrics = df.isnull().sum(axis=1)
rows_with_na = []
for i, x in enumerate(row_metrics):
if x > 0: rows_with_na.append(i)
return rows_with_na
def remove_NaNs(df):
rows_with_na = get_NaNs(df)
cleansed_df = df.drop(df.index[rows_with_na], inplace=False)
return cleansed_df
initial_data = pd.DataFrame.from_csv('Data_Adults_1_reduced.csv')
cleansed_df = remove_NaNs(initial_data)
# Let's also get rid of nominal data
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
X = cleansed_df.select_dtypes(include=numerics)
print X.shape
# Let's now clean columns getting rid of certain columns that might not be important to our analysis
cols2drop = ['GROUP_ID', 'doa', 'Baseline_header_id', 'Concentration_header_id',
'Baseline_Reading_id', 'Concentration_Reading_id']
X = X.drop(cols2drop, axis=1, inplace=False)
print X.shape
# For our studies children skew the data, it would be cleaner to just analyse adults
X = X.loc[X['Age'] >= 18]
print X.shape
###Output
(3926, 322)
(3881, 322)
###Markdown
we've now dropped the last of the discrete numerical inexplicable data, and removed children from the mix Extracting the samples we are interested in
###Code
# Let's extract ADHd and Bipolar patients (mutually exclusive)
ADHD = X.loc[X['ADHD'] == 1]
ADHD = ADHD.loc[ADHD['Bipolar'] == 0]
BP = X.loc[X['Bipolar'] == 1]
BP = BP.loc[BP['ADHD'] == 0]
print ADHD.shape
print BP.shape
# Keeping a backup of the data frame object because numpy arrays don't play well with certain scikit functions
ADHD_df = ADHD.copy(deep = True)
BP_df = BP.copy(deep = True)
ADHD = pd.DataFrame(ADHD.drop(['Patient_ID'], axis = 1, inplace = False))
BP = pd.DataFrame(BP.drop(['Patient_ID'], axis = 1, inplace = False))
###Output
(1383, 322)
(440, 322)
###Markdown
we see here that there 1383 people who have ADHD but are not Bipolar and 440 people who are Bipolar but do not have ADHD Dimensionality reduction PCA
###Code
combined = pd.concat([ADHD, BP])
combined_backup = pd.concat([ADHD, BP])
pca = PCA(n_components = 24, whiten = "True").fit(combined)
combined = pca.transform(combined)
print sum(pca.explained_variance_ratio_)
combined = pd.DataFrame(combined)
ADHD_reduced_df = combined[:1383]
BP_reduced_df = combined[1383:]
ADHD_reduced_df_id = ADHD_reduced_df.copy(deep = True)
BP_reduced_df_id = BP_reduced_df.copy(deep = True)
ADHD_reduced_df_id['Patient_ID'] = 123
BP_reduced_df_id['Patient_ID'] = 123
print ADHD_reduced_df.shape
print BP_reduced_df.shape
print ADHD_reduced_df_id.shape
print BP_reduced_df_id.shape
# resorting to some hacky crap, that I am ashamed to write, but pandas is refusing to cooperate
z = []
for x in BP_df['Patient_ID']:
z.append(x)
BP_reduced_df_id['Patient_ID'] = z
z = []
for x in ADHD_df['Patient_ID']:
z.append(x)
ADHD_reduced_df_id['Patient_ID'] = z
ADHD_pca = ADHD_reduced_df.copy(deep = True)
BP_pca = BP_reduced_df.copy(deep = True)
###Output
0.94670018985
(1383, 24)
(440, 24)
(1383, 25)
(440, 25)
###Markdown
We see here that most of the variance is preserved with just 24 features. Manifold Techniques ISOMAP
###Code
combined = manifold.Isomap(20, 20).fit_transform(combined_backup)
ADHD_iso = combined[:1383]
BP_iso = combined[1383:]
print pd.DataFrame(ADHD_iso).head()
###Output
0 1 2 3 4 5 \
0 1902.039550 -585.139359 -218.456990 -284.670196 -270.290695 800.963832
1 -1460.620572 760.904059 -145.632148 -316.888910 73.448451 -496.774712
2 -979.903617 -244.839809 287.919386 -809.002862 -93.291661 31.009373
3 2380.896428 -136.396847 1038.059415 94.820461 -8.510901 93.103319
4 3785.806785 -743.303358 -55.820741 532.036809 120.361002 -103.937491
6 7 8 9 10 11 \
0 -131.229362 -58.954203 -488.117735 290.063022 -233.997831 -18.622278
1 -360.861443 -35.114610 4.569729 182.670645 109.859489 43.898248
2 -322.128221 135.822512 473.378554 302.339227 -101.557987 -486.187360
3 58.787224 44.339368 288.570020 145.449473 273.676975 330.433262
4 -328.883959 -308.048198 -176.178076 206.023323 93.457154 99.003199
12 13 14 15 16 17 \
0 -56.004988 -32.666850 -246.312982 -118.228148 84.463058 225.965210
1 489.537799 -117.460890 -233.599066 -434.049786 350.592390 219.675481
2 472.590006 -287.829714 259.600410 -187.051557 -397.879062 -278.972686
3 346.786453 484.991693 222.228941 -462.668567 140.125330 170.317520
4 7.753871 -216.860875 11.532852 -425.298647 77.843013 -314.525649
18 19
0 -66.060321 -139.221523
1 216.895233 -52.940637
2 43.510626 56.987645
3 226.482586 -346.429327
4 -83.576841 391.517919
###Markdown
Multi dimensional scaling
###Code
mds = manifold.MDS(20).fit_transform(combined_backup)
ADHD_mds = combined[:1383]
BP_mds = combined[1383:]
print pd.DataFrame(ADHD_mds).head()
###Output
0 1 2 3 4 5 \
0 1902.039550 -585.139359 -218.456990 -284.670196 -270.290695 800.963832
1 -1460.620572 760.904059 -145.632148 -316.888910 73.448451 -496.774712
2 -979.903617 -244.839809 287.919386 -809.002862 -93.291661 31.009373
3 2380.896428 -136.396847 1038.059415 94.820461 -8.510901 93.103319
4 3785.806785 -743.303358 -55.820741 532.036809 120.361002 -103.937491
6 7 8 9 10 11 \
0 -131.229362 -58.954203 -488.117735 290.063022 -233.997831 -18.622278
1 -360.861443 -35.114610 4.569729 182.670645 109.859489 43.898248
2 -322.128221 135.822512 473.378554 302.339227 -101.557987 -486.187360
3 58.787224 44.339368 288.570020 145.449473 273.676975 330.433262
4 -328.883959 -308.048198 -176.178076 206.023323 93.457154 99.003199
12 13 14 15 16 17 \
0 -56.004988 -32.666850 -246.312982 -118.228148 84.463058 225.965210
1 489.537799 -117.460890 -233.599066 -434.049786 350.592390 219.675481
2 472.590006 -287.829714 259.600410 -187.051557 -397.879062 -278.972686
3 346.786453 484.991693 222.228941 -462.668567 140.125330 170.317520
4 7.753871 -216.860875 11.532852 -425.298647 77.843013 -314.525649
18 19
0 -66.060321 -139.221523
1 216.895233 -52.940637
2 43.510626 56.987645
3 226.482586 -346.429327
4 -83.576841 391.517919
###Markdown
As is evident above, the 2 manifold techniques don't really offer very different dimensionality reductions. Therefore we are just going to roll with Multi dimensional scaling Clustering and other grouping experiments Mean-Shift - mds
###Code
ADHD_clust = pd.DataFrame(ADHD_mds)
BP_clust = pd.DataFrame(BP_mds)
# This is a consequence of how we dropped columns, I apologize for the hacky code
data = pd.concat([ADHD_clust, BP_clust])
# Let's see what happens with Mean Shift clustering
bandwidth = estimate_bandwidth(data.get_values(), quantile=0.2, n_samples=1823) * 0.8
ms = MeanShift(bandwidth=bandwidth)
ms.fit(data.get_values())
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print('Estimated number of clusters: %d' % n_clusters_)
for cluster in range(n_clusters_):
ds = data.get_values()[np.where(labels == cluster)]
plt.plot(ds[:,0], ds[:,1], '.')
lines = plt.plot(cluster_centers[cluster, 0], cluster_centers[cluster, 1], 'o')
###Output
_____no_output_____
###Markdown
Though I'm not sure how to tweak the hyper-parameters of the bandwidth estimation function, there doesn't seem to be much difference. Minute variations to the bandwidth result in large cluster differences. Perhaps the data isn't very suitable for a contrived clustering technique like Mean-Shift. Therefore let us attempt something more naive and simplistic like K-Means K-Means clustering - mds
###Code
kmeans = KMeans(n_clusters=2)
kmeans.fit(data.get_values())
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
print('Estimated number of clusters: %d' % len(centroids))
print data.shape
for label in [0, 1]:
ds = data.get_values()[np.where(labels == label)]
plt.plot(ds[:,0], ds[:,1], '.')
lines = plt.plot(centroids[i,0], centroids[i,1], 'o')
###Output
_____no_output_____
###Markdown
As is evident from the above 2 experiments, no clear clustering is apparent.But there is some significant overlap and there 2 clear groups Classification Experiments Let's experiment with a bunch of classifiers
###Code
ADHD_mds = pd.DataFrame(ADHD_mds)
BP_mds = pd.DataFrame(BP_mds)
BP_mds['ADHD-Bipolar'] = 0
ADHD_mds['ADHD-Bipolar'] = 1
data = pd.concat([ADHD_mds, BP_mds])
class_labels = data['ADHD-Bipolar']
data = data.drop(['ADHD-Bipolar'], axis = 1, inplace = False)
print data.shape
data = data.get_values()
# Leave one Out cross validation
def leave_one_out(classifier, values, labels):
leave_one_out_validator = LeaveOneOut(len(values))
classifier_metrics = cross_validation.cross_val_score(classifier, values, labels, cv=leave_one_out_validator)
accuracy = classifier_metrics.mean()
deviation = classifier_metrics.std()
return accuracy, deviation
p_val = 100
knn = KNeighborsClassifier(n_neighbors = 5)
svc = SVC(gamma = 2, C = 1)
rf = RandomForestClassifier(n_estimators = 22)
dt = DecisionTreeClassifier(max_depth = 22)
qda = QDA()
gnb = GaussianNB()
classifier_accuracy_list = []
classifiers = [(knn, "KNN"), (svc, "SVM"), (rf, "Random Forest"), (dt, "Decision Tree"),
(qda, "QDA"), (gnb, "Gaussian NB")]
for classifier, name in classifiers:
accuracy, deviation = leave_one_out(classifier, data, class_labels)
print '%s accuracy is %0.4f (+/- %0.3f)' % (name, accuracy, deviation)
classifier_accuracy_list.append((name, accuracy))
###Output
KNN accuracy is 0.7071 (+/- 0.455)
SVM accuracy is 0.7586 (+/- 0.428)
Random Forest accuracy is 0.7301 (+/- 0.444)
Decision Tree accuracy is 0.6643 (+/- 0.472)
QDA accuracy is 0.6961 (+/- 0.460)
Gaussian NB accuracy is 0.7345 (+/- 0.442)
|
fabric_examples/basic_examples/create_network_l2sts.ipynb | ###Markdown
This notebook shows how to use Orchestrator APIs for user experiments
###Code
import os
from fabrictestbed.slice_manager import SliceManager, Status, SliceState
import json
ssh_key_file_priv=os.environ['HOME']+"/.ssh/id_rsa"
ssh_key_file_pub=os.environ['HOME']+"/.ssh/id_rsa.pub"
ssh_key_pub = None
with open (ssh_key_file_pub, "r") as myfile:
ssh_key_pub=myfile.read()
ssh_key_pub=ssh_key_pub.strip()
credmgr_host = os.environ['FABRIC_CREDMGR_HOST']
print(f"FABRIC Credential Manager : {credmgr_host}")
orchestrator_host = os.environ['FABRIC_ORCHESTRATOR_HOST']
print(f"FABRIC Orchestrator : {orchestrator_host}")
###Output
_____no_output_____
###Markdown
Create Slice Manager ObjectUsers can request tokens with different Project and Scopes by altering `project_name` and `scope` parameters in the refresh call below.
###Code
slice_manager = SliceManager(oc_host=orchestrator_host,
cm_host=credmgr_host ,
project_name='all',
scope='all')
# Initialize the slice manager
slice_manager.initialize()
###Output
_____no_output_____
###Markdown
Orchestrator API example to query for available resources
###Code
status, advertised_topology = slice_manager.resources()
print(f"Status: {status}")
if status == Status.OK:
print(f"Toplogy: {advertised_topology}")
else:
print(f"Error: {advertised_topology}")
if status == Status.OK:
advertised_topology.draw()
###Output
_____no_output_____
###Markdown
Create SliceIn Release 1.0, user is expected to create tagged interface and assign the IP addresses manually. Please use the example comands indicated below: Configure Slice Parameters
###Code
slice_name = 'MySlice'
site1 = 'RENC'
site2 = 'LBNL'
node1_name = 'Node1'
node2_name = 'Node2'
node3_name = 'Node3'
network_service_name='site2site1'
nic1_name = 'node1-nic1'
nic2_name = 'node2-nic1'
nic3_name = 'node3-nic1'
image = 'default_centos_8'
image_type = 'qcow2'
cores = 2
ram = 16
disk = 100
from fabrictestbed.slice_editor import ExperimentTopology, Capacities, ComponentType, ComponentModelType, ServiceType
# Create topology
t = ExperimentTopology()
# Add node
n1 = t.add_node(name=node1_name, site=site1)
# Set capacities
cap = Capacities()
cap.set_fields(core=cores, ram=ram, disk=disk)
# Set Properties
n1.set_properties(capacities=cap, image_type=image_type, image_ref=image)
# Add node
n2 = t.add_node(name=node2_name, site=site1)
# Set properties
n2.set_properties(capacities=cap, image_type=image_type, image_ref=image)
# Add node
n3 = t.add_node(name=node3_name, site=site2)
# Set properties
n3.set_properties(capacities=cap, image_type=image_type, image_ref=image)
# Shared Cards
n1.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic1_name)
n2.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic2_name)
n3.add_component(model_type=ComponentModelType.SmartNIC_ConnectX_6, name=nic3_name)
# L2STS Service
t.add_network_service(name='sts1', nstype=ServiceType.L2STS,
interfaces=[n1.interface_list[0], n2.interface_list[0], n3.interface_list[0]])
# Generate Slice Graph
slice_graph = t.serialize()
# Request slice from Orchestrator
return_status, slice_reservations = slice_manager.create(slice_name=slice_name,
slice_graph=slice_graph,
ssh_key=ssh_key_pub)
if return_status == Status.OK:
slice_id = slice_reservations[0].get_slice_id()
print("Submitted slice creation request. Slice ID: {}".format(slice_id))
else:
print(f"Failure: {slice_reservations}")
###Output
_____no_output_____
###Markdown
Get the Slice
###Code
import time
def wait_for_slice(slice,timeout=180,interval=10,progress=False):
timeout_start = time.time()
if progress: print("Waiting for slice .", end = '')
while time.time() < timeout_start + timeout:
return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing])
if return_status == Status.OK:
slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0]
if slice.slice_state == "StableOK":
if progress: print(" Slice state: {}".format(slice.slice_state))
return slice
if slice.slice_state == "Closing" or slice.slice_state == "Dead":
if progress: print(" Slice state: {}".format(slice.slice_state))
return slice
else:
print(f"Failure: {slices}")
if progress: print(".", end = '')
time.sleep(interval)
if time.time() >= timeout_start + timeout:
if progress: print(" Timeout exceeded ({} sec). Slice: {} ({})".format(timeout,slice.slice_name,slice.slice_state))
return slice
return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing])
if return_status == Status.OK:
slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0]
slice = wait_for_slice(slice, progress=True)
print()
print("Slice Name : {}".format(slice.slice_name))
print("ID : {}".format(slice.slice_id))
print("State : {}".format(slice.slice_state))
print("Lease End : {}".format(slice.lease_end))
###Output
_____no_output_____
###Markdown
Get the NodesRetrieve the node information and save the management IP address. Get the Topology
###Code
return_status, experiment_topology = slice_manager.get_slice_topology(slice_object=slice)
###Output
_____no_output_____
###Markdown
Configure Node1Use ssh to configure eth1 on node 1. ```ip addr add 192.168.10.51/24 dev eth1```
###Code
node1 = experiment_topology.nodes[node1_name]
management_ip_node1 = str(node1.get_property(pname='management_ip'))
print("Node Name : {}".format(node1.name))
print("Management IP : {}".format(management_ip_node1))
print()
import paramiko
key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(management_ip_node1,username='centos',pkey = key)
stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.51/24 dev eth1')
stdin, stdout, stderr = client.exec_command('ifconfig eth1')
print (str(stdout.read(),'utf-8').replace('\\n','\n'))
###Output
_____no_output_____
###Markdown
Configure Node2Use ssh to configure eth1 on each Node 2. ```ip addr add 192.168.10.52/24 dev eth1```
###Code
node2 = experiment_topology.nodes[node2_name]
management_ip_node2 = str(node2.get_property(pname='management_ip'))
print("Node Name : {}".format(node2.name))
print("Management IP : {}".format(management_ip_node2))
print()
import paramiko
key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(management_ip_node2,username='centos',pkey = key)
stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.52/24 dev eth1')
stdin, stdout, stderr = client.exec_command('ifconfig eth1')
print (str(stdout.read(),'utf-8').replace('\\n','\n'))
###Output
_____no_output_____
###Markdown
Configure Node3Use ssh to configure eth1 on node 3. ```ip addr add 192.168.10.52/24 dev eth1```
###Code
node3 = experiment_topology.nodes[node3_name]
management_ip_node3 = str(node3.get_property(pname='management_ip'))
print("Node Name : {}".format(node3.name))
print("Management IP : {}".format(management_ip_node3))
print()
import paramiko
key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(management_ip_node3,username='centos',pkey = key)
stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.53/24 dev eth1')
stdin, stdout, stderr = client.exec_command('ifconfig eth1')
print (str(stdout.read(),'utf-8').replace('\\n','\n'))
###Output
_____no_output_____
###Markdown
Delete Slice
###Code
return_status, result = slice_manager.delete(slice_object=slice)
print("Response Status {}".format(return_status))
print("Response received {}".format(result))
###Output
_____no_output_____
###Markdown
This notebook shows how to use Orchestrator APIs for user experiments
###Code
import os
from fabrictestbed.slice_manager import SliceManager, Status, SliceState
import json
bastion_public_addr = 'bastion-1.fabric-testbed.net'
bastion_private_ipv4_addr = '192.168.11.226'
bastion_private_ipv6_addr = '2600:2701:5000:a902::c'
bastion_username = '<your bastion username>'
bastion_key_filename = os.environ['HOME'] + "/.ssh/id_rsa_fabric"
ssh_key_file_priv=os.environ['HOME']+"/.ssh/id_rsa"
ssh_key_file_pub=os.environ['HOME']+"/.ssh/id_rsa.pub"
ssh_key_pub = None
with open (ssh_key_file_pub, "r") as myfile:
ssh_key_pub=myfile.read()
ssh_key_pub=ssh_key_pub.strip()
credmgr_host = os.environ['FABRIC_CREDMGR_HOST']
print(f"FABRIC Credential Manager : {credmgr_host}")
orchestrator_host = os.environ['FABRIC_ORCHESTRATOR_HOST']
print(f"FABRIC Orchestrator : {orchestrator_host}")
###Output
_____no_output_____
###Markdown
Create Slice Manager ObjectUsers can request tokens with different Project and Scopes by altering `project_name` and `scope` parameters in the refresh call below.
###Code
slice_manager = SliceManager(oc_host=orchestrator_host,
cm_host=credmgr_host ,
project_name='all',
scope='all')
# Initialize the slice manager
slice_manager.initialize()
###Output
_____no_output_____
###Markdown
Orchestrator API example to query for available resources
###Code
status, advertised_topology = slice_manager.resources()
print(f"Status: {status}")
if status == Status.OK:
print(f"Toplogy: {advertised_topology}")
else:
print(f"Error: {advertised_topology}")
if status == Status.OK:
advertised_topology.draw()
###Output
_____no_output_____
###Markdown
Create SliceIn Release 1.0, user is expected to create tagged interface and assign the IP addresses manually. Please use the example comands indicated below: Configure Slice Parameters
###Code
slice_name = 'MySlice'
site1 = 'MAX'
site2 = 'STAR'
node1_name = 'Node1'
node2_name = 'Node2'
node3_name = 'Node3'
network_service_name='site2site1'
nic1_name = 'node1-nic1'
nic2_name = 'node2-nic1'
nic3_name = 'node3-nic1'
username='centos'
image = 'default_centos_8'
image_type = 'qcow2'
cores = 2
ram = 8
disk = 100
from fabrictestbed.slice_editor import ExperimentTopology, Capacities, ComponentType, ComponentModelType, ServiceType
# Create topology
t = ExperimentTopology()
# Add node
n1 = t.add_node(name=node1_name, site=site1)
# Set capacities
cap = Capacities()
cap.set_fields(core=cores, ram=ram, disk=disk)
# Set Properties
n1.set_properties(capacities=cap, image_type=image_type, image_ref=image)
# Add node
n2 = t.add_node(name=node2_name, site=site1)
# Set properties
n2.set_properties(capacities=cap, image_type=image_type, image_ref=image)
# Add node
n3 = t.add_node(name=node3_name, site=site2)
# Set properties
n3.set_properties(capacities=cap, image_type=image_type, image_ref=image)
# Shared Cards
n1.add_component(model_type=ComponentModelType.SharedNIC_ConnectX_6, name=nic1_name)
n2.add_component(model_type=ComponentModelType.SharedNIC_ConnectX_6, name=nic2_name)
n3.add_component(model_type=ComponentModelType.SharedNIC_ConnectX_6, name=nic3_name)
# L2STS Service
t.add_network_service(name='sts1', nstype=ServiceType.L2STS,
interfaces=[n1.interface_list[0], n2.interface_list[0], n3.interface_list[0]])
# Generate Slice Graph
slice_graph = t.serialize()
# Request slice from Orchestrator
return_status, slice_reservations = slice_manager.create(slice_name=slice_name,
slice_graph=slice_graph,
ssh_key=ssh_key_pub)
if return_status == Status.OK:
slice_id = slice_reservations[0].get_slice_id()
print("Submitted slice creation request. Slice ID: {}".format(slice_id))
else:
print(f"Failure: {slice_reservations}")
###Output
_____no_output_____
###Markdown
Get the Slice
###Code
import time
def wait_for_slice(slice,timeout=180,interval=10,progress=False):
timeout_start = time.time()
if progress: print("Waiting for slice .", end = '')
while time.time() < timeout_start + timeout:
return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing])
if return_status == Status.OK:
slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0]
if slice.slice_state == "StableOK":
if progress: print(" Slice state: {}".format(slice.slice_state))
return slice
if slice.slice_state == "Closing" or slice.slice_state == "Dead":
if progress: print(" Slice state: {}".format(slice.slice_state))
return slice
else:
print(f"Failure: {slices}")
if progress: print(".", end = '')
time.sleep(interval)
if time.time() >= timeout_start + timeout:
if progress: print(" Timeout exceeded ({} sec). Slice: {} ({})".format(timeout,slice.slice_name,slice.slice_state))
return slice
return_status, slices = slice_manager.slices(excludes=[SliceState.Dead,SliceState.Closing])
if return_status == Status.OK:
slice = list(filter(lambda x: x.slice_name == slice_name, slices))[0]
slice = wait_for_slice(slice, progress=True)
print()
print("Slice Name : {}".format(slice.slice_name))
print("ID : {}".format(slice.slice_id))
print("State : {}".format(slice.slice_state))
print("Lease End : {}".format(slice.lease_end))
###Output
_____no_output_____
###Markdown
Get the NodesRetrieve the node information and save the management IP address. Get the Topology
###Code
return_status, experiment_topology = slice_manager.get_slice_topology(slice_object=slice)
###Output
_____no_output_____
###Markdown
Configure Node1Use ssh to configure eth1 on node 1. ```ip addr add 192.168.10.51/24 dev eth1```
###Code
node1 = experiment_topology.nodes[node1_name]
management_ip_node1 = str(node1.get_property(pname='management_ip'))
print("Node Name : {}".format(node1.name))
print("Management IP : {}".format(management_ip_node1))
print()
from ipaddress import ip_address, IPv4Address
def validIPAddress(IP: str) -> str:
try:
return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6"
except ValueError:
return "Invalid"
import paramiko
management_ip = management_ip_node1
key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv)
bastion=paramiko.SSHClient()
bastion.set_missing_host_key_policy(paramiko.AutoAddPolicy())
bastion.connect(bastion_public_addr, username=bastion_username, key_filename=bastion_key_filename)
bastion_transport = bastion.get_transport()
if validIPAddress(management_ip) == 'IPv4':
src_addr = (bastion_private_ipv4_addr, 22)
elif validIPAddress(management_ip) == 'IPv6':
src_addr = (bastion_private_ipv6_addr, 22)
else:
print('Management IP Invalid: {}'.format(management_ip))
dest_addr = (management_ip, 22)
bastion_channel = bastion_transport.open_channel("direct-tcpip", dest_addr, src_addr)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(management_ip,username=username,pkey = key, sock=bastion_channel)
stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.51/24 dev eth1')
stdin, stdout, stderr = client.exec_command('ifconfig eth1')
print (str(stdout.read(),'utf-8').replace('\\n','\n'))
###Output
_____no_output_____
###Markdown
Configure Node2Use ssh to configure eth1 on each Node 2. ```ip addr add 192.168.10.52/24 dev eth1```
###Code
node2 = experiment_topology.nodes[node2_name]
management_ip_node2 = str(node2.get_property(pname='management_ip'))
print("Node Name : {}".format(node2.name))
print("Management IP : {}".format(management_ip_node2))
print()
from ipaddress import ip_address, IPv4Address
def validIPAddress(IP: str) -> str:
try:
return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6"
except ValueError:
return "Invalid"
import paramiko
management_ip = management_ip_node2
key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv)
bastion=paramiko.SSHClient()
bastion.set_missing_host_key_policy(paramiko.AutoAddPolicy())
bastion.connect(bastion_public_addr, username=bastion_username, key_filename=bastion_key_filename)
bastion_transport = bastion.get_transport()
if validIPAddress(management_ip) == 'IPv4':
src_addr = (bastion_private_ipv4_addr, 22)
elif validIPAddress(management_ip) == 'IPv6':
src_addr = (bastion_private_ipv6_addr, 22)
else:
print('Management IP Invalid: {}'.format(management_ip))
dest_addr = (management_ip, 22)
bastion_channel = bastion_transport.open_channel("direct-tcpip", dest_addr, src_addr)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(management_ip,username=username,pkey = key, sock=bastion_channel)
stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.52/24 dev eth1')
stdin, stdout, stderr = client.exec_command('ifconfig eth1')
print (str(stdout.read(),'utf-8').replace('\\n','\n'))
###Output
_____no_output_____
###Markdown
Configure Node3Use ssh to configure eth1 on node 3. ```ip addr add 192.168.10.52/24 dev eth1```
###Code
node3 = experiment_topology.nodes[node3_name]
management_ip_node3 = str(node3.get_property(pname='management_ip'))
print("Node Name : {}".format(node3.name))
print("Management IP : {}".format(management_ip_node3))
print()
from ipaddress import ip_address, IPv4Address
def validIPAddress(IP: str) -> str:
try:
return "IPv4" if type(ip_address(IP)) is IPv4Address else "IPv6"
except ValueError:
return "Invalid"
import paramiko
management_ip = management_ip_node3
key = paramiko.RSAKey.from_private_key_file(ssh_key_file_priv)
bastion=paramiko.SSHClient()
bastion.set_missing_host_key_policy(paramiko.AutoAddPolicy())
bastion.connect(bastion_public_addr, username=bastion_username, key_filename=bastion_key_filename)
bastion_transport = bastion.get_transport()
if validIPAddress(management_ip) == 'IPv4':
src_addr = (bastion_private_ipv4_addr, 22)
elif validIPAddress(management_ip) == 'IPv6':
src_addr = (bastion_private_ipv6_addr, 22)
else:
print('Management IP Invalid: {}'.format(management_ip))
dest_addr = (management_ip, 22)
bastion_channel = bastion_transport.open_channel("direct-tcpip", dest_addr, src_addr)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(management_ip,username=username,pkey = key, sock=bastion_channel)
stdin, stdout, stderr = client.exec_command('sudo ip addr add 192.168.10.53/24 dev eth1')
stdin, stdout, stderr = client.exec_command('ifconfig eth1')
print (str(stdout.read(),'utf-8').replace('\\n','\n'))
###Output
_____no_output_____
###Markdown
Delete Slice
###Code
return_status, result = slice_manager.delete(slice_object=slice)
print("Response Status {}".format(return_status))
print("Response received {}".format(result))
###Output
_____no_output_____ |
aPY.ipynb | ###Markdown
###Code
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable
from copy import deepcopy
from sklearn.preprocessing import normalize
import glob, os
class encoder(nn.Module):
def __init__(self):
super(encoder, self).__init__()
self.fc1 = torch.nn.Linear(2048, 1000)
self.fc2 = torch.nn.Linear(1000, 500)
self.fc3 = torch.nn.Linear(500, 100)
self.rel = torch.nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.rel(x)
x = self.fc2(x)
x = self.rel(x)
x = self.fc3(x)
return x
class decoder(nn.Module):
def __init__(self):
super(decoder, self).__init__()
self.n_e = 64
self.n_y = 32
self.fc1 = torch.nn.Linear(50 + self.n_e + self.n_y, 500)
self.fc2 = torch.nn.Linear(500, 1000)
self.fc3 = torch.nn.Linear(1000, 2048 + 32 + 64)
self.rel = torch.nn.ReLU()
def forward(self, x):
x = self.fc1(x)
x = self.rel(x)
x = self.fc2(x)
x = self.rel(x)
x = self.fc3(x)
x_out = x[:, :2048]
y_out = x[:, 2048: 2048 + 32]
em_out = x[:, (2048 + 32):]
return x_out, y_out, em_out
class VAE(nn.Module):
def __init__(self, eps):
super(VAE, self).__init__()
self.en = encoder()
self.de = decoder()
self.eps = eps
def forward(self, x, one_hot, cls_att):
#print(x.shape, 'aa')
x = self.en(x)
mu = x[:, :50]
logvar = x[:, 50:]
std = torch.exp(0.5 * logvar)
z = mu + self.eps * std
z1 = torch.cat((z, one_hot), axis = 1)
z1 = torch.cat((z1, cls_att), axis = 1)
return self.de(z1), mu, logvar
class private(nn.Module):
def __init__(self, eps):
super(private, self).__init__()
self.task = torch.nn.ModuleList()
self.eps = eps
for _ in range(4):
self.task.append(VAE(self.eps))
def forward(self, x, one_hot, cls_att, task_id):
return self.task[task_id].forward(x, one_hot, cls_att)
class NET(nn.Module):
def __init__(self, eps):
super(NET, self).__init__()
self.eps = eps
#self.shared = VAE(self.eps)
self.private = private(self.eps)
#self.fc1 = torch.nn.Linear(4096, 2048)
self.head = torch.nn.ModuleList()
for _ in range(4):
self.head.append(
nn.Sequential(
nn.Linear(2048, 1000),
nn.Linear(1000, 500),
nn.Linear(500, 32)
)
)
def forward(self, x, one_hot, cls_att, task_id):
#s_x, s_mu, s_logvar = self.shared(x, one_hot, cls_att)
#print(s_x.shape)
p_out, p_mu, p_logvar = self.private(x, one_hot, cls_att, task_id)
#x = torch.cat((s_x, p_x), axis = 1)
#x = self.fc1(x)
return self.head[task_id].forward(x), (p_out, p_mu, p_logvar)
def common_features(self, z, task_id):
x_p, _, _ = self.private.task[task_id].de(z)
#x_s = self.shared.de(z)
#x = torch.cat((x_s, x_p), axis = 1)
return x_p #self.fc1(x)
path = 'FolderPath'
train_data_path = path + '/trainData'
train_label_path = path + '/trainLabels'
train_attr_path = path + '/trainAttributes'
test_data_path = path + '/testData'
test_label_path = path + '/testLabels'
test_attr_path = path + '/testAttributes'
attributes_path = path + '/dataAttributes'
def dataprocess(data_path):
with open(data_path, 'rb') as fopen:
#contents = np.load(fopen, allow_pickle=True, encoding='bytes')
contents = np.load(fopen, allow_pickle=True, encoding='latin1')
return contents
trainData1 = dataprocess(train_data_path)
trainLabels1 = dataprocess(train_label_path)
trainLabelsVectors1 = dataprocess(train_attr_path)
testData1 = dataprocess(test_data_path)
testLabels1 = dataprocess(test_label_path)
testlabelsvectors1 = dataprocess(test_attr_path)
ATTR = dataprocess(attributes_path)
class CLASSIFIER(nn.Module):
def __init__(self):
super(CLASSIFIER, self).__init__()
self.fc1 = torch.nn.Linear(2048, 2000)
self.fc2 = torch.nn.Linear(2000, 200)
self.fc3 = torch.nn.Linear(200, 32)
self.drop = nn.Dropout(p = 0.2)
self.rel = torch.nn.ReLU()
def forward(self, x):
#print(x.shape, '254')
x = self.fc1(x)
x = self.rel(x)
x = self.fc2(x)
x = self.rel(x)
x = self.drop(x)
x = self.fc3(x)
return x
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
import random
class CL_VAE():
def __init__(self):
super(CL_VAE, self).__init__()
self.batch_size = 64
self.num_classes = 32
self.build_model()
self.set_cuda()
self.criterion = torch.nn.CrossEntropyLoss()
self.recon = torch.nn.MSELoss()
#self.L1 = torch.nn.L1Loss()
self.L1 = torch.nn.MSELoss()
self.seen_acc = []
self.unseen_acc = []
self.hm_acc = []
self.overall_acc = []
def build_model(self):
self.eps = torch.randn(self.batch_size, 50)
self.eps = self.eps.cuda()
self.net = NET(self.eps)
pytorch_total_params = sum(p.numel() for p in self.net.parameters() if p.requires_grad)
print('pytorch_total_params:', pytorch_total_params)
def set_cuda(self):
self.net.cuda()
def VAE_loss(self, recon, mu, sigma):
kl_div = -0.5 * torch.sum(1 + sigma - mu.pow(2) - sigma.exp())
#print('kl_div', kl_div.item())
return recon + kl_div
def train(self, all_traindata, all_trainlabels, all_testdata, all_testlabels, all_train_attr, all_test_attr, all_attr, total_tasks):
replay_classes = []
for i in range(total_tasks):
traindata = torch.tensor(all_traindata[i])
trainlabels = torch.tensor(all_trainlabels[i])
testdata = torch.tensor(all_testdata[i])
testlabels = torch.tensor(all_testlabels[i])
train_attr = torch.tensor(all_train_attr[i], dtype = torch.float32)
test_attr = torch.tensor(all_test_attr[i])
attr = torch.tensor(all_attr)
#print(trainlabels, 'avfr')
replay_classes.append(sorted(list(set(trainlabels.numpy().tolist()))))
if i + 1 == 1:
self.train_task(traindata.float(), trainlabels, train_attr, i)
#replay_classes.append(sorted(list(set(trainlabels.detach().numpy().tolist()))))
else:
num_gen_samples = 50
#z_dim = 108
for m in range(i):
#print(replay_classes, 'replay_classes')
replay_trainlabels = []
for ii in replay_classes[m]:
for j in range(num_gen_samples):
replay_trainlabels.append(ii)
replay_trainlabels = torch.tensor(replay_trainlabels)
replay_trainlabels_onehot = self.one_hot(replay_trainlabels)
replay_attr = torch.tensor(attr[replay_trainlabels])
labels_attr = torch.cat((replay_trainlabels_onehot, replay_attr), axis = 1)
z = torch.randn(replay_trainlabels.shape[0], 50)
z_one_hot = torch.cat((z, labels_attr), axis = 1)
z_one_hot = z_one_hot.cuda()
replay_data = self.net.common_features(z_one_hot.float(), m).detach().cpu()
train_attr = torch.cat((replay_attr, train_attr), axis = 0)
traindata = torch.cat((replay_data, traindata), axis = 0)
trainlabels = torch.cat((replay_trainlabels, trainlabels))
testdata = torch.cat((testdata, torch.tensor(all_testdata[m])), axis = 0)
testlabels = torch.cat((testlabels, torch.tensor(all_testlabels[m])))
#print(sorted(list(set(testlabels.detach().numpy().tolist()))), 'aaa', i + 1)
self.train_task(traindata.float(), trainlabels, train_attr.float(), i)
testdata_unseen = []
testlabels_unseen = []
testdata_seen = []
testlabels_seen = []
for j in range(i + 1):
testdata_seen = testdata_seen + all_testdata[j]
testlabels_seen = testlabels_seen + all_testlabels[j]
for k in range(j + 1, total_tasks):
testdata_unseen = testdata_unseen + all_testdata[k]
testlabels_unseen = testlabels_unseen + all_testlabels[k]
all_labels = sorted(list(set(testlabels_seen))) + sorted(list(set(testlabels_unseen)))
num_samples = 150
labels_list = []
for label in all_labels:
for l in range(num_samples):
labels_list.append(label)
attr_labels = attr[labels_list]
labels_list = torch.tensor(labels_list, dtype = torch.int64)
labels_list_onehot = self.one_hot(labels_list)
#print(labels_list_onehot.shape, 'aa', attr_labels.shape)
attr_labels_onehot = torch.cat((labels_list_onehot, attr_labels), axis = 1)
noise = torch.randn(len(labels_list), 50)
noise_others = torch.cat((noise, attr_labels_onehot), axis = 1)
noise_others = noise_others.float().cuda()
#print(noise_others.shape, 'aaa')
pseudodata = self.net.common_features(noise_others, i)
test_seen = torch.tensor(testdata_seen)
testlabels_s = torch.tensor(testlabels_seen)
testlabels_us = torch.tensor(testlabels_unseen)
#print(test_seen.shape, test_unseen.shape, testlabels_s.shape, testlabels_us.shape)
scaler = StandardScaler()
pseudodata = torch.from_numpy(scaler.fit_transform(pseudodata.detach().cpu().numpy())).cuda()
test_seen = torch.from_numpy(scaler.transform(test_seen.detach().numpy()))
if i < total_tasks - 1:
test_unseen = torch.tensor(testdata_unseen)
test_unseen = torch.from_numpy(scaler.transform(test_unseen.detach().numpy()))
#pseudodata = torch.from_numpy(normalize(pseudodata.detach().cpu().numpy(), axis = 1)).cuda()
#test_seen = torch.from_numpy(normalize(pseudodata.detach().cpu().numpy), axis = 1).to(cuda)
else:
test_unseen = None
testlabels_us = None
self.class_train(i, pseudodata, labels_list.cuda(), test_seen, testlabels_s, test_unseen, testlabels_us)
def dataloader(self, x, y, attr = None):
#x = x.detach().numpy()
#length = x.shape[0]
length = x.size()[0]
indices = np.arange(length)
random.shuffle(indices)
new_x = x[indices]
new_y = y[indices]
if attr is not None:
new_attr = attr[indices]
return new_x, new_y, new_attr
else:
return new_x, new_y
#print(x.shape, dataloader(x, args))
def class_train(self, task_id, pseudodata, labels_list, test_seen, testlabels_s, test_unseen = None, testlabels_us = None):
pseudodata, labels_list = self.dataloader(pseudodata, labels_list)
#print(sorted(list(set(labels_list.detach().cpu().numpy()))), 'aaa')
self.CLASS = CLASSIFIER()
self.CLASS = self.CLASS.cuda()
class_opti = torch.optim.Adam(self.CLASS.parameters(), lr = 1e-4)
num_epochs = 25
batch_s = 64
num_iter = int(pseudodata.shape[0]/batch_s)
for e in range(num_epochs):
for i in range(num_iter):
self.CLASS.train()
self.CLASS.zero_grad()
batch_data = pseudodata[i * batch_s : (i + 1) * batch_s]
batch_label = labels_list[i * batch_s : (i + 1) * batch_s]
#print(batch_data.shape, '145')
out = self.CLASS(batch_data)
loss = self.criterion(out, batch_label)
loss.backward(retain_graph = True)
class_opti.step()
#print('Epoch:', e + 1, 'Loss:', loss.item())
_, pred_s = torch.max(self.CLASS(test_seen.float().cuda()), axis = 1)
if testlabels_us is not None:
_, pred_us = torch.max(self.CLASS(test_unseen.float().cuda()), axis = 1)
pred_us = pred_us.detach().cpu()
pred_s = pred_s.detach().cpu()
correct = {}
total = {}
for m in range(self.num_classes):
correct[m] = 0
total[m] = 0
for m in range(test_seen.shape[0]):
#print(testlabels_s[m].item(), '44') #break
if pred_s[m].item() == testlabels_s[m].item():
#print(testlabels_s[m], '44')
correct[testlabels_s[m].item()] += 1
total[testlabels_s[m].item()] += 1
acc1 = 0
acc2 = 0
num_s = 0
num_us = 0
seenclasses = sorted(list(set(testlabels_s.detach().cpu().numpy())))
for m in seenclasses:
acc1 += correct[m]*1/total[m]
num_s += 1
acc1 = acc1/num_s
self.seen_acc.append(acc1)
if testlabels_us is not None:
unseenclasses = sorted(list(set(testlabels_us.detach().cpu().numpy())))
for m in range(test_unseen.shape[0]):
if pred_us[m].item() == testlabels_us[m].item():
correct[testlabels_us[m].item()] += 1
total[testlabels_us[m].item()] += 1
for m in unseenclasses:
acc2 += correct[m]/total[m]
num_us += 1
acc2 = acc2/num_us
self.unseen_acc.append(acc2)
self.hm_acc.append((2 * self.unseen_acc[task_id] * self.seen_acc[task_id])/(self.seen_acc[task_id] + self.unseen_acc[task_id]))
self.overall_acc.append((len(testlabels_s) * self.seen_acc[task_id] + len(testlabels_us) * self.unseen_acc[task_id])/(len(testlabels_s) + len(testlabels_us)))
print('self.seen_acc:', np.mean(self.seen_acc))
print('self.unseen_acc:', np.mean(self.unseen_acc))
print('self.hm_acc:', np.mean(self.hm_acc))
def one_hot(self, labels):
matrix = torch.zeros(len(labels), self.num_classes)
rows = np.arange(len(labels))
matrix[rows, labels] = 1
return matrix
def model_save(self):
torch.save(self.net.state_dict(), os.path.join(self.net_path))
def train_task(self, traindata, trainlabels, train_attr, task_id):
traindata, trainlabels, train_attr = self.dataloader(traindata, trainlabels, train_attr)
net_opti = torch.optim.Adam(self.net.parameters(), lr = 1e-4)
num_iterations = int(traindata.shape[0]/self.batch_size)
num_epochs = 101
for e in range(num_epochs):
for i in range(num_iterations):
self.net.zero_grad()
self.net.train()
batch_data = traindata[i * self.batch_size : (i + 1)*self.batch_size]
batch_label = trainlabels[i * self.batch_size : (i + 1)*self.batch_size]
batch_train_attr = train_attr[i * self.batch_size : (i + 1)*self.batch_size]
batch_label_one_hot = self.one_hot(batch_label)
batch_data = batch_data.cuda()
batch_label = batch_label.cuda()
batch_label_one_hot = batch_label_one_hot.cuda()
batch_train_attr = batch_train_attr.cuda()
out, private_out = self.net(batch_data, batch_label_one_hot, batch_train_attr, task_id)
#s_x, s_mu, s_logvar = shared_out
p_out, p_mu, p_logvar = private_out
p_x, p_y, p_em = p_out
#print(p_y.shape, 'aa', p_em.shape)
#print(batch_label.shape, 'ap', batch_train_attr.shape)
#print(out.shape, '12', batch_label.shape, s_x.shape)
#p_y_onehot = self.one_hot(p_y) #detach
cross_en_loss = self.criterion(out, batch_label)
y_loss = self.L1(p_y, batch_label_one_hot)
#print(p_em.shape, batch_train_attr.shape)
em_loss = self.L1(p_em, batch_train_attr)
#s_recon = self.recon(batch_data, s_x)
p_recon = self.recon(batch_data, p_x)
#s_VAE_loss = self.VAE_loss(s_recon, s_mu, s_logvar)
p_VAE_loss = self.VAE_loss(p_recon, p_mu, p_logvar)
all_loss = cross_en_loss + p_VAE_loss #+ y_loss + em_loss#+ s_VAE_loss
all_loss.backward(retain_graph=True)
net_opti.step()
#print('epoch:', e + 1, 'task_loss', cross_en_loss.item(), 'p_VAE', p_VAE_loss.item())
import time
model = CL_VAE()
st = time.time()
model.train(trainData1, trainLabels1, testData1, testLabels1, trainLabelsVectors1, testlabelsvectors1, ATTR, 4)
en = time.time()
print("It takes:", en - st, 'seconds')
###Output
_____no_output_____ |
challenges/ibm-quantum/africa-2021/lab3/lab3.ipynb | ###Markdown
IBM Quantum Challenge Africa: Quantum Chemistry for HIV Table of Contents| Walk-through ||:-||[Preface](preface)||[Introduction](intro)||[Step 1 : Defining the Molecular Geometry](step_1)||[Step 2 : Calculating the Qubit Hamiltonian](step_2)||[Step 2a: Constructing the Fermionic Hamiltonion](step_3)||[Step 2b: Getting Ready to Convert to a Qubit Hamiltonian](step_2b)||[Step 3 : Setting up the Variational Quantum Eigensolver (VQE)](step_3)||[Step 3a: The V in VQE (i.e. the Variational form, a Trial state)](step_3a)||[Step 3b: The Q in VQE: the Quantum environment](step_3b)||[Step 3c: Initializing VQE](step_3c)||[Step 4 : Solving for the Ground-state](step_4)||||[The HIV Challenge](challenge)||[1. Refining Step 1: Varying the Molecule](refine_step_1)||[2. Refining Step 2: Reducing the quantum workload](refine_step_2)||[3. Refining Step 4: Energy Surface](refine_step_4)||[4. Refining Step 3a](refine_step_3a)||Exercises||[Exercise 3a: Molecular Definition of Macromolecule with Blocking Approach](exercise_3a)||[Exercise 3b: Classical-Quantum Treatment Conceptual Questions (Multiple-Choice)](exercise_3b)||[Exercise 3c: Energy Landscape, To bind or not to bind?](exercise_3c)||[Exercise 3d: The effect of more repetitions](exercise_3d)||[Exercise 3e: Open-ended: Find the best hardware_inspired_trial to minimize the Energy Error for the Macromolecule](exercise_3e)||[Quantum Chemistry Resources](qresource)|Preface**HIV is a virus that has presented an immense challenge for public health, globally**. The ensuing disease dynamics touch on multiple societal dimensions including nutrition, access to health, education and research funding. To compound the difficulties, the virus mutates rapidly with different strains having different geographic footprints. In particular, the HIV-1-C and HIV-2 strains predominate mostly in Africa. Due to disparities in funding, research for treatments of the African strains lags behind other programmes. African researchers are striving to address this imbalance and should consider adding the latest technologies such as quantum computing to their toolkits.**Quantum computing promises spectacular improvements in drug-design**. In particular, in order to design new anti-retrovirals it is important to perform **chemical simulations** to confirm that the anti-retroviral binds with the virus protein. Such simulations are notoriously hard and sometimes ineffective on classical supercomputers. Quantum computers promise more accurate simulations allowing for a better drug-design workflow.In detail: anti-retrovirals are drugs that bind with and block a virus protein, called protease, that cleaves virus polyproteins into smaller proteins, ready for packaging. The protease can be thought of as a chemical scissor. The anti-retroviral can be thought of as a sticky obstacle that disrupts the ability of the scissor to cut. With the protease blocked, the virus cannot make more copies of itself.Mutations in the viral protease changes the binding propensity of a given anti-retroviral. Hence, when a mutation occurs and an anti-retroviral no longer binds well, the goal becomes to adjust the anti-retroviral molecule to again bind strongly.**The main goal of this challenge is to explore whether a toy anti-retroviral molecule binds with a toy virus protease.**Along the way, this challenge introduces **state-of-the-art hybrid classical-quantum embedded chemistry modelling** allowing the splitting of the work-load between classical approximations and more accurate quantum calculations.Finally, you need to tweak the setup of the quantum chemistry algorithm (without having to understand the nuts and bolts of quantum computing) to achieve the best performance for ideal quantum computing conditions. *A video explaining how HIV infects and how anti-retroviral treatment works*:
###Code
from IPython.display import display, YouTubeVideo
YouTubeVideo('cSNaBui2IM8')
###Output
_____no_output_____
###Markdown
Walk-through: Calculating the Ground-state Energy for the Simplest Molecule in the Universe *Import relevant packages*
###Code
from qiskit import Aer
from qiskit_nature.drivers import PySCFDriver, UnitsType, Molecule
from qiskit_nature.problems.second_quantization.electronic import ElectronicStructureProblem
from qiskit_nature.mappers.second_quantization import JordanWignerMapper, BravyiKitaevMapper
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit_nature.transformers import ActiveSpaceTransformer
from qiskit_nature.algorithms import GroundStateEigensolver, BOPESSampler
from qiskit.algorithms import NumPyMinimumEigensolver
from qiskit.utils import QuantumInstance
from qiskit_nature.circuit.library.ansatzes import UCCSD
from qiskit_nature.circuit.library.initial_states import HartreeFock
from qiskit.circuit.library import TwoLocal
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import COBYLA
from functools import partial as apply_variation_to_atom_pair
import numpy as np
import matplotlib.pyplot as plt
###Output
/opt/conda/lib/python3.8/site-packages/pyscf/lib/misc.py:47: H5pyDeprecationWarning: Using default_file_mode other than 'r' is deprecated. Pass the mode to h5py.File() instead.
h5py.get_config().default_file_mode = 'a'
###Markdown
IntroductionIn the HIV Challenge, we are tasked with investigating whether the toy anti-retroviral molecule binds with and therefore, disrupts the toy protease molecule. Successful binding is determined by a lower total ground-state energy for the molecules when they are close together (forming a single macromolecule) compared to far apart.Total ground-state energy refers to the sum of the energies concerning the arrangement of the electrons and the nuclei. The nuclear energy is easy to calculate classically. It is the energy of the electron distribution (i.e. molecular spin-orbital occupation) that is extremely difficult and requires a quantum computer.We start with a walk-through tutorial, where we calculate the ground-state energy of a simple molecule and leave the more complicated set-up to the challenge section. The ground-state of a molecule in some configuration consists of the locations of the nuclei, together with some distribution of electrons around the nuclei. The nucleus-nucleus, nuclei-electron and electron-electron forces/energy of attraction and repulsion are captured in a matrix called the **Hamiltonian**. Since the nuclei are relatively massive compared to the electrons, they move at a slower time-scale than the electrons. This allows us to split the calculation into two parts: placing the nuclei and calculating the electron distribution, followed by moving the nuclei and recalculating the electron distribution until a minimum total energy distribution is reached: Algorithm: Find_total_ground_statePlace nuclei Repeat until grid completed or no change in total_energy: - calculate electronic ground-state - total_energy = (nuclei repulsion + electronic energy) - move nuclei (either in grid or following gradient)return total_energy In the walk-through, we simply fix the nuclei positions; however, later, in the challenge section, we allow for a varying one-dimensional intermolecular distance between the anti-retroviral and the protease molecules, which represents the anti-retroviral approaching the protease molecule in an attempt to bind. Step 1: Defining the Molecular Geometry For this walk-through, we work with the simplest non-trivial molecule possible: H$_2$, the hydrogen gas molecule.*The first thing to do is to fix the location of each nucleus. This is specified as a python list of nuclei, where each nucleus (as a list) contains a string corresponding to the atomic species and its 3D co-ordinates (as another list). We also specify the overall charge, which tells Qiskit to automatically calculate the number of needed electrons to produce that charge:*
###Code
hydrogen_molecule = Molecule(geometry=
[['H', [0., 0., 0.]],
['H', [0., 0., 0.735]]],
charge=0, multiplicity=1)
###Output
_____no_output_____
###Markdown
Step 2: Calculating the Qubit Hamiltonian Once nuclei positions are fixed (the nucleus-nucleus forces are temporarily irrelevant), the only part of the Hamiltonian that then needs to be calculated on the quantum computer is the detailed electron-electron interaction. The nuclei-electron and a rough mean field electron-electron interaction can be pre-computed as *allowed molecular orbitals* on a classical computer via the, so called, Hartree-Fock approximation. With these allowed molecular orbitals and their pre-calculated overlaps, Qiskit automatically produces an interacting electron-electron **fermionic molecular-orbital Hamiltonian** (called Second Quantization). The molecular orbital and overlap pre-calculation are provided by classical packages, e.g. PySCF, and connected to Qiskit via a so-called *driver*, in particular, we use the PySCFDriver. Step 2a: Constructing the Fermionic Hamiltonion *We specify the driver to the classical software package that is to be used to calculate the resulting orbitals of the provided molecule after taking into account the nuclei-electron and mean-field interactions. The `basis` option selects the basis set in which the molecular orbitals are to be expanded in. `sto3g` is the smallest available basis set:*
###Code
molecular_hydrogen_orbital_maker = PySCFDriver(molecule=hydrogen_molecule, unit=UnitsType.ANGSTROM, basis='sto3g')
###Output
_____no_output_____
###Markdown
*Qiskit provides a helpful Class named the ElectronicStructureProblem, which calls the driver in the right way to construct the molecular orbitals. We initialise ElectronicStructureProblem with the driver (which already has the molecular information stored in it from the previous step):*
###Code
hydrogen_fermionic_hamiltonian = ElectronicStructureProblem(molecular_hydrogen_orbital_maker)
###Output
_____no_output_____
###Markdown
*Here, we instruct the ElectronicStructureProblem object to go ahead and create the fermionic molecular-orbital Hamiltonian (which gets stored internally):*
###Code
hydrogen_fermionic_hamiltonian.second_q_ops()
print("Completed running classical package.\nFermionic molecular-orbital Hamiltonian calculated and stored internally.")
print("An example of HF info available: Orbital Energies", hydrogen_fermionic_hamiltonian._molecule_data_transformed.orbital_energies)
###Output
Completed running classical package.
Fermionic molecular-orbital Hamiltonian calculated and stored internally.
An example of HF info available: Orbital Energies [-0.58062892 0.67633625]
###Markdown
(If this step is not run explicitly, and its outputs are not used in an intermediary step, the final ground_state solving step would run it automatically.) Step 2b: Getting Ready to Convert to a Qubit Hamiltonian Above, *fermionic* is a term to describe the behaviour of electrons (having an anti-symmetric wave-function obeying the Pauli Exclusion principle). In order to use the quantum computer we need to map the electrons (which exhibit fermionic behavior) to the quantum computer's qubits (which have closely related spin behaviour: Pauli Exclusion but not necessarily anti-symmetric). This mapping is a generic process, independent of the driver above. There are multiple mapping methods available, each with pros and cons, and constitutes something to experiment with. *For now, we select the simplest qubit mapper/converter called the Jordan-Wigner Mapper:*
###Code
map_fermions_to_qubits = QubitConverter(JordanWignerMapper())
# e.g. alternative:
# map_fermions_to_qubits = QubitConverter(BravyiKitaevMapper())
###Output
_____no_output_____
###Markdown
(Note, we have just chosen the mapper above, it has not yet been applied to the fermionic Hamiltonian.) Step 3: Setting up the Variational Quantum Eigensolver (VQE)Now that we have defined the molecule and its mapping onto a quantum computer, we need to select an algorithm to solve for the ground state. There are two well-known approaches: Quantum Phase Estimation (QPE) and VQE. The first requires fault-tolerant quantum computers that have not yet been built. The second is suitable for current day, noisy **depth**-restricted quantum computers, because it is a hybrid quantum-classical method with short-depth quantum circuits. By *depth* of the circuit, it suffices to know that quantum computers can only be run for a short while, before noise completely scrambles the results.Therefore, for now, we only explore the VQE method. Furthermore, VQE offers many opportunities to tweak its configuration; thus, as an end-user you gain experience in quantum black-box tweaking. VQE is an algorithm for finding the ground-state of a molecule (or any Hamiltonian in general). It is a hybrid quantum-classical algorithm, which means that the algorithm consists of two interacting stages, a quantum stage and a classical stage. During the quantum stage, a trial molecular state is created on the quantum computer. The trial state is specified by a collection of **parameters** which are provided and adjusted by the classical stage. After the trial state is created, its energy is calculated on the quantum computer (by a few rounds of quantum-classical measurements). The result is finally available classically. At this stage, a classical optimization algorithm looks at the previous energy levels and the new energy level and decides how to adjust the trial state parameters. This process repeats until the energy essentially stops decreasing. The output of the whole algorithm is the final set of parameters that produced the winning approximation to the ground-state and its energy level. Step 3a: The V in VQE (i.e. the Variational form, a Trial state)VQE works by 'searching' for the electron orbital occupation distribution with the lowest energy, called the ground-state. The quantum computer is repeatedly used to calculate the energy of the search trial state.The trial state is specified by a collection of (randomly initialized) parameters that move the state around, in our search for the ground-state (we're minimizing the energy cost-function). The form of the 'movement' is something that can be tweaked (i.e., the definition of the structure of the *ansatz*/trial). There are two broad approaches we could follow. The first, let's call it *Chemistry-Inspired Trial-states*, is to use domain knowledge of what we expect the ground-state to look like from a chemistry point of view and build that into our trial state. The second, let's call it *Hardware-Inspired Trial-states*, is to simply try and create trial states that have as wide a reach as possible while taking into account the architecure of the available quantum computers. *Chemistry-Inspired Trial-states*Since chemistry gives us domain-specific prior information (e.g., the number of orbitals and electrons and the actual Hartree-Fock approximation), it makes sense to guide the trial state by baking this knowledge into the form of the trial. *From the HF approximation we get the number of orbitals and from that we can calculate the number of spin orbitals:*
###Code
hydrogen_molecule_info = hydrogen_fermionic_hamiltonian.molecule_data_transformed
num_hydrogen_molecular_orbitals = hydrogen_molecule_info.num_molecular_orbitals
num_hydrogen_spin_orbitals = 2 * num_hydrogen_molecular_orbitals
###Output
_____no_output_____
###Markdown
*Furthermore, we can also extract the number of electrons (spin up and spin down):*
###Code
num_hydrogen_electrons_spin_up_spin_down = (hydrogen_molecule_info.num_alpha, hydrogen_molecule_info.num_beta)
###Output
_____no_output_____
###Markdown
*With the number of spin orbitals, the number of electrons able to fill them and the mapping from fermions to qubits, we can construct an initial quantum computing state for our trial state:*
###Code
hydrogen_initial_state = HartreeFock(num_hydrogen_spin_orbitals,
num_hydrogen_electrons_spin_up_spin_down,
map_fermions_to_qubits)
###Output
_____no_output_____
###Markdown
*Finally, Qiskit provides a Class (Unitary Coupled Cluster Single and Double excitations, `UCCSD`) that takes the above information and creates a parameterised state inspired by the HF approximation, that can be iteratively adjusted in our attempt to find the ground-state:*
###Code
hydrogen_chemistry_inspired_trial = UCCSD(map_fermions_to_qubits,
num_hydrogen_electrons_spin_up_spin_down,
num_hydrogen_spin_orbitals,
initial_state=hydrogen_initial_state)
###Output
_____no_output_____
###Markdown
*Hardware-Inspired Trial-states*The problem with the above "chemistry-inspired" trial-states, is that they are quite deep, quickly using up the available depth of current-day quantum computers. A potential solution is to forgo this chemistry knowledge and try to represent arbitrary states with trial states that are easy to prepare and parametrically "move" around on current hardware. There are two quantum operations that can be used to try and reach arbitrary states: mixing (our term for *conditional sub-space rotation*) and rotating (*unconditional rotation*). Detailed knowledge of how these operations and their sub-options work are not really needed, especially because it is not immediately obvious which settings produce the best results. Mixing (also called Entanglement maps)There are a set of available mixing strategies, that you may experiment with. This is specified with two arguments, *`entanglement`* (choosing what to mix) and *`entanglement_blocks`* (choosing how to mix):Possible *`entanglement`* values: `'linear'`, `'full'`, `'circular'`, `'sca'`Possible *`entanglement_blocks`* values: `'cz'`, `'cx'`For our purposes, it is acceptable to simply choose the first option for each setting. RotationThere are a set of available *parameterized* rotation strategies. The rotation strategies are specified as a single argument, *`rotation_blocks`*, in the form of a list of any combination of the following possibilities:Possible *`rotation_blocks`*: `'ry'`, `'rx'`,`'rz'`,`'h'`, ...Typically, this is the only place that parameters are introduced in the trial state. One parameter is introduced for every rotation, corresponding to the angle of rotation around the associated axis. (Note, `'h'` does not have any parameters and so can not be selected alone.)Again, for our purposes, an acceptable choice is the first option alone in the list. *Qiskit provides a Class called `TwoLocal` for creating random trial states by local operations only. The number of **rounds** of the local operations is specified by the argument `reps`:*
###Code
hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz',
entanglement='linear', reps=2)
###Output
_____no_output_____
###Markdown
(Note, this trial state does not depend on the molecule.) *Just for convenience, let's choose between the two approaches by assiging the choice to a variable:*
###Code
hydrogen_trial_state = hydrogen_chemistry_inspired_trial
# OR
# hydrogen_trial_state = hardware_inspired_trial
###Output
_____no_output_____
###Markdown
Step 3b: The Q in VQE: the Quantum environment Since VQE runs on a quantum computer, it needs information about this stage. For testing purposes, this can even be a simulation, both in the form of noise-free or noisy simulations. Ultimately, we would want to run VQE an actual (albeit noisy) quantum hardware and hopefully, in the not-too-distant future, achieve results unattainable classically. For this challenge, let us pursue noise-free simulation only. Noise-Free Simulation*To set up a noise-free simulation:*
###Code
noise_free_quantum_environment = QuantumInstance(Aer.get_backend('statevector_simulator'))
###Output
_____no_output_____
###Markdown
Step 3c: Initializing VQE Qiskit Nature provides a class called VQE, that implements the VQE algorithm. *It is initialized in a generic way (without reference to the molecule or the Hamiltonian) and requires the two pieces of information from above: the trial state and the quantum environment:*
###Code
hydrogen_vqe_solver = VQE(ansatz=hydrogen_trial_state, quantum_instance=noise_free_quantum_environment)
###Output
_____no_output_____
###Markdown
(Note, the vqe solver is only tailored to hydrogen if the trial state is the hydrogen_chemistry_inspired_trial.) Step 4: Solving for the Ground-state **Qiskit Nature provides a class called GroundStateEigensolver to calculate the ground-state of a molecule.**This class first gets initialised with information that is independent of any molecule. It can then be applied to specific molecules using the same generic setup.To initialise a GroundStateEigensolver object, we need to provide the two generic algorithmic sub-components from above, the mapping method (Step 2b) and the solving method (Step 3). For testing purposes, an alternative to the VQE solver is a classical solver (see numpy_solver below).
###Code
hydrogen_ground_state = GroundStateEigensolver(map_fermions_to_qubits, hydrogen_vqe_solver)
###Output
_____no_output_____
###Markdown
We are finally ready to solve for the ground-state energy of our molecule.We apply the GroundStateEigensolver to the fermionic Hamiltonian (Step 2a) which has encoded in it the molecule (Step 1). The already specified mapper and VQE solver is then automatically applied for us to produce the ground-state (approximation).
###Code
hydrogen_ground_state_info = hydrogen_ground_state.solve(hydrogen_fermionic_hamiltonian)
print(hydrogen_ground_state_info)
###Output
=== GROUND STATE ENERGY ===
* Electronic ground state energy (Hartree): -1.857275030145
- computed part: -1.857275030145
~ Nuclear repulsion energy (Hartree): 0.719968994449
> Total ground state energy (Hartree): -1.137306035696
=== MEASURED OBSERVABLES ===
0: # Particles: 2.000 S: 0.000 S^2: 0.000 M: -0.000
=== DIPOLE MOMENTS ===
~ Nuclear dipole moment (a.u.): [0.0 0.0 1.3889487]
0:
* Electronic dipole moment (a.u.): [0.0 0.0 1.38894841]
- computed part: [0.0 0.0 1.38894841]
> Dipole moment (a.u.): [0.0 0.0 0.00000029] Total: 0.00000029
(debye): [0.0 0.0 0.00000074] Total: 0.00000074
###Markdown
As you can see, we have calculated the Ground-state energy of the electron distribution: -1.85 HartreeFrom the placement of the nuclei, we are also conveniently given the repulsion energy (a simple classical calculation).Finally, when it comes to the ground-state of the overall molecule it is the total ground state energy that we are trying to minimise.So the next step would be to move the nuclei and recalculate the **total ground state energy** in search of the stable nuclei positions. To end our discussion, let us compare the quantum-calculated energy to an accuracy-equivalent (but slower) classical calculation.
###Code
#Alternative Step 3b
numpy_solver = NumPyMinimumEigensolver()
#Alternative Step 4
ground_state_classical = GroundStateEigensolver(map_fermions_to_qubits, numpy_solver)
hydrogen_ground_state_info_classical = ground_state_classical.solve(hydrogen_fermionic_hamiltonian)
hydrogen_energy_classical = hydrogen_ground_state_info.computed_energies[0]
print("Ground-state electronic energy (via classical calculations): ", hydrogen_energy_classical, "Hartree")
###Output
Ground-state electronic energy (via classical calculations): -1.857275030145182 Hartree
###Markdown
The agreement to so many decimal places tells us that, for this particular Hamiltonian, the VQE process is accurately finding the lowest eigenvalue (and interestingly, the ansatz/trial does not fail to capture the ground-state, probably because it spans the entire Hilbert space). However, when comparing to nature or very accurate classical simulations of $H_2$, we find that the energy is only accurate to two decimal places, e.g. total energy VQE: -1.137 Hartree vs highly accurate classical simulation: -1.166 Hartree, which only agrees two decimal places. The reason for this is that in our above treatment there are sources of modelling error including: the placement of nuclei and a number of approximations that come with the Hartree-Fock expansion. For $H_2$ these can be addressed, but ultimately, in general, the more tricky of these sources can never be fully handled because finding the perfect ground-state is QMA-complete, i.e. the quantum version of NP-complete (i.e. 'unsolvable' for certain Hamiltonians). Then again, nature itself is not expected to be finding this perfect ground-state, so future experimention is needed to see how close a given quantum computing solution approximates nature's solution. Walk-through Finished *** The HIV ChallengeNow that we have completed the walk-through, we frame the challenge as the task to refine steps 1-4 while answering related questions. 1. Refining Step 1: Varying the MoleculeIn Step 1, we defined our molecule. For the challenge, we need to firstly define a new molecule, corresponding to our toy protease molecule (the *scissor*) with an approaching toy anti-retroviral (the *blocker*), forming a *macromolecule*. Secondly, we need to instruct Qiskit to vary the approach distance. Let's learn how to do the second step with the familiar hydrogen molecule. *Here is how to specify the type of molecular variation we are interested in (namely, changing the approach distance in absolute steps)*:
###Code
molecular_variation = Molecule.absolute_stretching
#Other types of molecular variation:
#molecular_variation = Molecule.relative_stretching
#molecular_variation = Molecule.absolute_bending
#molecular_variation = Molecule.relative_bending
###Output
_____no_output_____
###Markdown
*Here is how we specify which atoms the variation applies to. The numbers refer to the index of the atom in the geometric definition list. The first atom of the specified atom_pair, is moved closer to the left-alone second atom:*
###Code
specific_molecular_variation = apply_variation_to_atom_pair(molecular_variation, atom_pair=(1, 0))
###Output
_____no_output_____
###Markdown
*Finally, here is how we alter the original molecular definition that you have already seen in the walk-through:*
###Code
hydrogen_molecule_stretchable = Molecule(geometry=
[['H', [0., 0., 0.]],
['H', [0., 0., 0.735]]],
charge=0, multiplicity=1,
degrees_of_freedom=[specific_molecular_variation])
###Output
_____no_output_____
###Markdown
If we wanted to test that the variation is working, we could manually specify a given amount of variation (Qiskit calls it a *perturbation*) and then see what the new geometry is:
###Code
hydrogen_molecule_stretchable.perturbations = [0.1]
###Output
_____no_output_____
###Markdown
(If the above were not specified, a perturbation of zero would be assumed, defaulting to the original geometry.)
###Code
hydrogen_molecule_stretchable.geometry
###Output
_____no_output_____
###Markdown
Notice how only the second atom of our geometry list (index 1, specified first in the atom_pair) has moved closer to the other atom by the amount we specified. When it comes time to scanning across different approach distances this is very helpfully automated by Qiskit. Specifying the Protease+Anti-retroviral Macromolecule ProteaseA real protease molecule is made up of two polypeptide chains of around one hundred amino-acids in each chain (the two chains are folded together), with neighbouring pairs connected by the so-called *peptide-bond*.For our toy protease molecule, we have decided to take inspiration from this peptide bond since it is the basic building structure holding successive amino acids in proteins together. It is one of the most important factors in determining the chemistry of proteins, including protein folding in general and the HIV protease's cleaving ability, in particular.To simplify the calculations, let us choose to focus on the O=C-N part of molecule. We keep and also add enough hydrogen atoms to try and make the molecule as realistic as possible (indeed, HCONH$_2$, Formamide, is a stable molecule, which, incidentally, is an ionic solvent, so it does "cut" ionic bonds).Making O=C-N our toy protease molecule is an extreme simplification, but nevertheless biologically motivated.Here is our toy protease:```"O": (1.1280, 0.2091, 0.0000)"N": (-1.1878, 0.1791, 0.0000)"C": (0.0598, -0.3882, 0.0000)"H": (-1.3085, 1.1864, 0.0001)"H": (-2.0305, -0.3861, -0.0001)"H": (-0.0014, -1.4883, -0.0001)```Just for fun, you may imagine that this molecule is a pair of scissors, ready to cut the HIV master protein (Gag-Pol polyprotein), in the process of making copies of the HI virus: Anti-retroviralThe anti-retroviral is a molecule that binds with the protease to **inhibit/block the cleaving mechanism**. For this challenge, we select a single carbon atom to be our stand-in for the anti-retroviral molecule. MacromoleculeEven though the two molecules are separate in our minds, when they approach, they form a single macro-molecule, with the outer-electrons forming molecular orbitals around all the atoms.As explained in the walk-through, the quantum electronic distribution is calculated for fixed atom positions, thus we have to separately place the atoms. For the first and second task, let us fix the protease's co-ordinates and only vary the anti-retroviral's position along a straight line.We arbitrarily select a line of approach passing through a given point and approaching the nitrogen atom. This "blocking" approach tries to obstruct the scissor from cutting. If it "sticks", it's working and successfully disrupts the duplication efforts of the HIV. Exercise 3a: Molecular Definition of Macromolecule with Blocking ApproachConstruct the molecular definition and molecular variation to represent the anti-retroviral approaching the nitrogen atom, between the "blades": ``` "C": (-0.1805, 1.3955, 0.0000) ``` Write your answer code here: Create a your molecule in the cell below. Make sure to name the molecule `macromolecule`.
###Code
## Add your code here
specific_molecular_variation = apply_variation_to_atom_pair(molecular_variation, atom_pair=(6, 1))
macromolecule = Molecule(geometry=[('O',[1.1280, 0.2091, 0.0000]),
('N',[-1.1878, 0.1791, 0.0000]),
('C', [0.0598, -0.3882, 0.0000]),
('H',[-1.3085, 1.1864, 0.0001]),
('H',[-2.0305, -0.3861, -0.0001]),
('H',[-0.0014, -1.4883, -0.0001]),
('C',[-0.1805, 1.3955, 0.0000]),],degrees_of_freedom=[specific_molecular_variation])##
###Output
_____no_output_____
###Markdown
To submit your molecule to the grader, run the cell below.
###Code
from qc_grader import grade_ex3a
grade_ex3a(molecule=macromolecule)
###Output
Submitting your answer for ex3/partA. Please wait...
Congratulations ๐! Your answer is correct and has been submitted.
###Markdown
2. Refining Step 2: Reducing the quantum workload In Step 2, we constructed the qubit Hamiltonian. If we tried to apply Step 2 and beyond to our macromolecule above, the ground state calculation simulation would fail. The reason is because since we specified a zero charge, Qiskit knows that it must work with 30 (= 2\*6+7+8+3\*1) electrons. After second quantization, this translates into, say, 60 spin-orbitals which requires 60 qubits. 60 qubits is beyond our ability to simulate classically and while there are IBM Quantum systems with more than 60 qubits available, the noise levels are currently too high to produce accurate results when using that many qubits. Thus, for the purpose of this Challenge we need to reduce the number of qubits. Fortunately, this is well-motivated from a chemistry point of view as well: the classical Hartree-Fock approximation for core-electrons is sometimes sufficient to obtain accurate chemical results. Doubly fortunately, Qiskit has just recently been extended to seamlessly allow for users to specify that certain electrons should receive quantum-computing treatment while the remaining electrons should be classically approximated. Even as more qubits come on online, this facility may prove very useful in allowing near-term quantum computers to tackle very large molecules that would otherwise be out of reach. *Therefore, we next demonstrate how to instruct Qiskit to give a certain number of electrons quantum-computing treatment:*
###Code
macro_molecular_orbital_maker = PySCFDriver(molecule=macromolecule, unit=UnitsType.ANGSTROM, basis='sto3g')
split_into_classical_and_quantum = ActiveSpaceTransformer(num_electrons=2, num_molecular_orbitals=2)
macro_fermionic_hamiltonian = ElectronicStructureProblem(macro_molecular_orbital_maker, [split_into_classical_and_quantum])
###Output
_____no_output_____
###Markdown
Above, Qiskit provides a class called **ActiveSpaceTransformer** that takes in two arguments. The first is the number of electrons that should receive quantum-computing treatment (selected from the outermost electrons, counting inwards). The second is the number of orbitals to allow those electrons to roam over (around the so-called Fermi level). It is the second number that determines how many qubits are needed. Exercise 3b: Classical-Quantum Treatment Conceptual Questions (Multiple-Choice)Q1: Why does giving quantum treatment to outer electrons of the macromolecule first, make more heuristic sense?```A: Outer electrons have higher binding energies and therefore swing the ground state energy more, therefore requiring quantum treatment.B: Outer electrons exhibit more quantum interference because their orbitals are more spread out.C: Inner core-electrons typically occupy orbitals more straightforwardly, because they mostly orbit a single nucleus and therefore do not lower the energy much by interacting/entangling with outer electrons.```Q2: For a fixed number of quantum-treatment electrons, as you increase the number of orbitals that those electrons roam over (have access to), does the calculated ground-state energy approach the asymptotic energy from above or below?```A: The asymptotic energy is approached from above, because as you increase the possible orbitals that the electrons have access to, the lower the ground state could be.B: The asymptotic energy is approached from below, because as you increase the possible orbitals the more accurate is your simulation, adding energy that was left out before.C: The asymptotic energy is approached from below, because as you increase the possible orbitals that the electrons have access to, the lower the ground state could be.D: The asymptotic energy is approached from above, because as you increase the possible orbitals the more accurate is your simulation, adding energy that was left out before.``` **Uncomment your answers to these multiple choice questions in the code-cell below. Run the cell to submit your answers.**
###Code
from qc_grader import grade_ex3b
## Q1
# answer_for_ex3b_q1 = 'A'
# answer_for_ex3b_q1 = 'B'
# answer_for_ex3b_q1 = 'C'
##
answer_for_ex3b_q1 = 'C'
## Q2
# answer_for_ex3b_q2 = 'A'
# answer_for_ex3b_q2 = 'B'
# answer_for_ex3b_q2 = 'C'
# answer_for_ex3b_q2 = 'D'
##
answer_for_ex3b_q2 = 'A'
grade_ex3b(answer_for_ex3b_q1, answer_for_ex3b_q2)
###Output
Submitting your answer for ex3/partB. Please wait...
Congratulations ๐! Your answer is correct and has been submitted.
###Markdown
3. Refining Step 4: Energy Surface In Step 4, we ran the ground_state solver on a given molecule once only and we haven't yet explained how to instruct Qiskit to vary the molecular geometry using the specification introduced above. As explained in the introduction, changing the nuclei positions and comparing the total energy levels, is a method for finding the nuclei arrangement with the lowest energy. If the lowest energy is **not** at "infinity", this corresponds to a "stable" bound state of the molecule at the energy minimum. The energy as a function of atomic separation is thus a crucial object of study. This function is called the **Born-Oppenheimer Potential Energy Surface (BOPES)**. Qiskit provides a helpful python Class that manages this process of varying the geometry and repeatedly calling the ground_state solver: **BOPESSampler**.Let's demonstrate BOPESSampler for the hydrogen molecule.*The only steps of the hydrogen molecule walk-through that need to be re-run are Steps 1 and 2a:*
###Code
hydrogen_stretchable_molecular_orbital_maker = PySCFDriver(molecule=hydrogen_molecule_stretchable, unit=UnitsType.ANGSTROM, basis='sto3g')
hydrogen_stretchable_fermionic_hamiltonian = ElectronicStructureProblem(hydrogen_stretchable_molecular_orbital_maker)
###Output
_____no_output_____
###Markdown
*Secondly, here is how to call the sampler:*
###Code
energy_surface = BOPESSampler(gss=hydrogen_ground_state, bootstrap=False) # same solver suffices, since the trial is the same
perturbation_steps = np.linspace(-0.5, 2, 25) # 25 equally spaced points from -0.5 to 2, inclusive.
energy_surface_result = energy_surface.sample(hydrogen_stretchable_fermionic_hamiltonian, perturbation_steps)
###Output
/opt/conda/lib/python3.8/site-packages/qiskit_nature/algorithms/pes_samplers/bopes_sampler.py:192: DeprecationWarning:
The VQE.optimal_params property is deprecated as of Qiskit Terra 0.18.0
and will be removed no sooner than 3 months after the releasedate.
This information is part of the returned result object and can be
queried as VQEResult.optimal_point.
optimal_params = self._gss.solver.optimal_params # type: ignore
###Markdown
*Thirdly, here is how to produce the famous energy landscape plot:*
###Code
def plot_energy_landscape(energy_surface_result):
if len(energy_surface_result.points) > 1:
plt.plot(energy_surface_result.points, energy_surface_result.energies, label="VQE Energy")
plt.xlabel('Atomic distance Deviation(Angstrom)')
plt.ylabel('Energy (hartree)')
plt.legend()
plt.show()
else:
print("Total Energy is: ", energy_surface_result.energies[0], "hartree")
print("(No need to plot, only one configuration calculated.)")
plot_energy_landscape(energy_surface_result)
###Output
_____no_output_____
###Markdown
For extra intuition, you may think of the energy landscape as a mountain, next to a valley, next to a plateau that a ball rolls on (the x co-ordinate of the ball corresponds the separation between the two hydrogen atoms). If the ball is not rolling too fast down the plateau (right to left) it may settle in the valley. The ball slowly rolls down the plateau because the slope is positive (representing a force of attraction between the two hydrogen atoms). If the ball overshoots the minimum point of the valley, it meets the steep negative slope of the mountain and quickly rolls back (the hydrogen atoms repell each other).Notice the minimum is at zero. This is because we defined the hydrogen molecule's nuclei positions at the known ground state positions.By the way, if we had used the hardware_inspired_trial we would have produced a similiar plot, however it would have had bumps because the anzatz does not capture the electronic ground state equally well at different bond lengths. Exercise 3c: Energy Landscape, To bind or not to bind?The million-dollar question: Does our toy anti-retrovial bind and thus block the protease? - Search for the minimum from -0.5 to 5 for 30 points. - Give quantum-computing treatment to 2 electrons roaming over 2 orbitalsQ1. Submit the energy landscape for the anti-retroviral approaching the protease.Q2. Is there a clear minimum at a finite separation? Does binding occur?```A. Yes, there is a clear minimum at 0, so binding does occur.B. Yes, there is a clear minimum at infinity, so binding only happens at infinity.C. No, there is no clear minimum for any separation, so binding occurs because there is no seperation.D. No, there is no clear minimum for any separation, so there is no binding.```(Don't preempt the answer. Furthermore, the answer might change for other approaches and other settings, so please stick to the requested settings.) *Feel free to use the following function, which collects the entire walk-through and refinements to Step 2 and 4. It takes in a Molecule (of refinement Step 1 type), the inputs for the other refinements and boolean choice of whether to use VQE or the numpy solver:*
###Code
def construct_hamiltonian_solve_ground_state(
molecule,
num_electrons=2,
num_molecular_orbitals=2,
chemistry_inspired=True,
hardware_inspired_trial=None,
vqe=True,
perturbation_steps=np.linspace(-1, 1, 3),
):
"""Creates fermionic Hamiltonion and solves for the energy surface.
Args:
molecule (Union[qiskit_nature.drivers.molecule.Molecule, NoneType]): The molecule to simulate.
num_electrons (int, optional): Number of electrons for the `ActiveSpaceTransformer`. Defaults to 2.
num_molecular_orbitals (int, optional): Number of electron orbitals for the `ActiveSpaceTransformer`. Defaults to 2.
chemistry_inspired (bool, optional): Whether to create a chemistry inspired trial state. `hardware_inspired_trial` must be `None` when used. Defaults to True.
hardware_inspired_trial (QuantumCircuit, optional): The hardware inspired trial state to use. `chemistry_inspired` must be False when used. Defaults to None.
vqe (bool, optional): Whether to use VQE to calculate the energy surface. Uses `NumPyMinimumEigensolver if False. Defaults to True.
perturbation_steps (Union(list,numpy.ndarray), optional): The points along the degrees of freedom to evaluate, in this case a distance in angstroms. Defaults to np.linspace(-1, 1, 3).
Raises:
RuntimeError: `chemistry_inspired` and `hardware_inspired_trial` cannot be used together. Either `chemistry_inspired` is False or `hardware_inspired_trial` is `None`.
Returns:
qiskit_nature.results.BOPESSamplerResult: The surface energy as a BOPESSamplerResult object.
"""
# Verify that `chemistry_inspired` and `hardware_inspired_trial` do not conflict
if chemistry_inspired and hardware_inspired_trial is not None:
raise RuntimeError(
(
"chemistry_inspired and hardware_inspired_trial"
" cannot both be set. Either chemistry_inspired"
" must be False or hardware_inspired_trial must be none."
)
)
# Step 1 including refinement, passed in
# Step 2a
molecular_orbital_maker = PySCFDriver(
molecule=molecule, unit=UnitsType.ANGSTROM, basis="sto3g"
)
# Refinement to Step 2a
split_into_classical_and_quantum = ActiveSpaceTransformer(
num_electrons=num_electrons, num_molecular_orbitals=num_molecular_orbitals
)
fermionic_hamiltonian = ElectronicStructureProblem(
molecular_orbital_maker, [split_into_classical_and_quantum]
)
fermionic_hamiltonian.second_q_ops()
# Step 2b
map_fermions_to_qubits = QubitConverter(JordanWignerMapper())
# Step 3a
if chemistry_inspired:
molecule_info = fermionic_hamiltonian.molecule_data_transformed
num_molecular_orbitals = molecule_info.num_molecular_orbitals
num_spin_orbitals = 2 * num_molecular_orbitals
num_electrons_spin_up_spin_down = (
molecule_info.num_alpha,
molecule_info.num_beta,
)
initial_state = HartreeFock(
num_spin_orbitals, num_electrons_spin_up_spin_down, map_fermions_to_qubits
)
chemistry_inspired_trial = UCCSD(
map_fermions_to_qubits,
num_electrons_spin_up_spin_down,
num_spin_orbitals,
initial_state=initial_state,
)
trial_state = chemistry_inspired_trial
else:
if hardware_inspired_trial is None:
hardware_inspired_trial = TwoLocal(
rotation_blocks=["ry"],
entanglement_blocks="cz",
entanglement="linear",
reps=2,
)
trial_state = hardware_inspired_trial
# Step 3b and alternative
if vqe:
noise_free_quantum_environment = QuantumInstance(Aer.get_backend('statevector_simulator'))
solver = VQE(ansatz=trial_state, quantum_instance=noise_free_quantum_environment)
else:
solver = NumPyMinimumEigensolver()
# Step 4 and alternative
ground_state = GroundStateEigensolver(map_fermions_to_qubits, solver)
# Refinement to Step 4
energy_surface = BOPESSampler(gss=ground_state, bootstrap=False)
energy_surface_result = energy_surface.sample(
fermionic_hamiltonian, perturbation_steps
)
return energy_surface_result
###Output
_____no_output_____
###Markdown
Your answer The following code cells give a skeleton to call `construct_hamiltonian_solve_ground_state` and plot the results. Once you are confident with your results, submit them in the code-cell that follows.**Note: `construct_hamiltonian_solve_ground_state` will take some time to run (approximately 2 minutes). Do not worry if it doesn't return a result immediately.**
###Code
# Q1
# Calculate the energies
q1_energy_surface_result = construct_hamiltonian_solve_ground_state(
molecule=macromolecule,
num_electrons=2,
num_molecular_orbitals=2,
chemistry_inspired=True,
vqe=True,
perturbation_steps=np.linspace(-0.5, 5, 30),
)
# Plot the energies to visualize the results
plot_energy_landscape(q1_energy_surface_result)
## Q2
# answer_for_ex3c_q2 = 'A'
# answer_for_ex3c_q2 = 'B'
# answer_for_ex3c_q2 = 'C'
# answer_for_ex3c_q2 = 'D'
answer_for_ex3c_q2 = 'D'
###Output
_____no_output_____
###Markdown
Once you are happy with the results you have acquired, submit the energies and parameters for `construct_hamiltonian_solve_ground_state` in the following cell. Change the values for all parameters, except `energy_surface`, to have the same value that you used in your call of `construct_hamiltonian_solve_ground_state`
###Code
from qc_grader import grade_ex3c
grade_ex3c(
energy_surface=q1_energy_surface_result.energies,
molecule=macromolecule,
num_electrons=2,
num_molecular_orbitals=2,
chemistry_inspired=True,
hardware_inspired_trial=None,
vqe=True,
perturbation_steps=np.linspace(-0.5, 5, 30),
q2_multiple_choice=answer_for_ex3c_q2
)
###Output
Submitting your answer for ex3/partC. Please wait...
Congratulations ๐! Your answer is correct and has been submitted.
###Markdown
4. Refining Step 3a The last refinement is a lesson in how black-box tweaking can improve results.In Step 3a, the hardware_inspired_trial is designed to run on actual current-day hardware. Recall this line from the walk-through:
###Code
hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz',
entanglement='linear', reps=2)
###Output
_____no_output_____
###Markdown
Let us get a feel for the `reps` (repetition) parameter. This parameter controls how many rounds of mix and rotate are applied in the trial state. In more detail: there is an initial round of rotations, before mix (often containing no parameters) and another round of rotations are repeated. Certain gates don't generate parameters (e.g. `h`, `cz`). Each round of rotations adds an extra set of parameters that the classical optimizer adjusts in the search for the ground state.Let's relook at the simple hydrogen molecule and compute the "ideal" lowest energy electronic energy using the chemistry trial, the numpy solver and a single zero perturbation (i.e., no perturbations):
###Code
true_total_energy = construct_hamiltonian_solve_ground_state(
molecule=hydrogen_molecule_stretchable, # Step 1
num_electrons=2, # Step 2a
num_molecular_orbitals=2, # Step 2a
chemistry_inspired=True, # Step 3a
vqe=False, # Step 3b
perturbation_steps = [0]) # Step 4
plot_energy_landscape(true_total_energy)
###Output
Total Energy is: -1.137306035753395 hartree
(No need to plot, only one configuration calculated.)
###Markdown
We take this as the true value for the rest of our experiment.*Next, select `chemistry_inspired=False`, `vqe=True` and pass in a hardware trial with 1 round*:
###Code
hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz',
entanglement='linear', reps=1)
quantum_calc_total_energy = construct_hamiltonian_solve_ground_state(
molecule=hydrogen_molecule_stretchable, # Step 1
num_electrons=2, # Step 2a
num_molecular_orbitals=2, # Step 2a
chemistry_inspired=False, # Step 3a
hardware_inspired_trial=hardware_inspired_trial, # Step 3a
vqe=True, # Step 3b
perturbation_steps = [0]) # Step 4
plot_energy_landscape(quantum_calc_total_energy)
###Output
Total Energy is: -1.1169986828048528 hartree
(No need to plot, only one configuration calculated.)
###Markdown
*Notice the difference is small and positive:*
###Code
quantum_calc_total_energy.energies[0] - true_total_energy.energies[0]
###Output
_____no_output_____
###Markdown
*Let's see how many parameters are used to specify the trial state:*
###Code
total_number_of_parameters = len(hardware_inspired_trial._ordered_parameters)
print("Total number of adjustable parameters: ", total_number_of_parameters)
###Output
Total number of adjustable parameters: 8
###Markdown
Exercise 3d: The effect of more repetitions Q1: Try reps equal to 1 (done for you) and 2 and compare the errors. What happens to the error? Does it increase, decrease, or stay the same?Be aware that: - VQE is a statistical algorithm, so run it a few times before observing the pattern. - Going beyond 2 may not continue the pattern. - Note that `reps` is defined in `TwoLocal`Q2: Check the total number of parameters for reps equal 1 and 2. How many parameters are introduced per round of rotations? Write your answer here: **Enter your answer to the first multiple choice question in the code-cell below and add your answer for Q2. Run the cell to submit your answers.**
###Code
from qc_grader import grade_ex3d
## Q1
# answer_for_ex3d_q1 = 'decreases'
# answer_for_ex3d_q1 = 'increases'
# answer_for_ex3d_q1 = 'stays the same'
##
answer_for_ex3d_q1 = 'decreases'
## Q2
answer_for_ex3d_q2 = 4
##
grade_ex3d(answer_for_ex3d_q1, answer_for_ex3d_q2)
###Output
Submitting your answer for ex3/partD. Please wait...
Congratulations ๐! Your answer is correct and has been submitted.
###Markdown
Exercise 3e: Open-ended: Find the best hardware_inspired_trial to minimize the Energy Error for the Macromolecule Turning to the macromolecule again. Using, `chemistry_inspired=False`, `vqe=True`, `perturbation_steps = [0]`, a maximum of 8 qubits, and your own hardware_inspired_trial with any combination of options from the walk-through; find the lowest energy. Your answer to this exercise includes all parameters passed to `construct_hamiltonian_solve_ground_state` and the result object it returns. This exercise is scored based on how close your computed energy $E_{computed}$ is to the "true" minimum energy of the macromolecule $E_{true}$. This score is calculated as shown below, rounded to the nearest integer. $$\text{score} = -10 \times \log_{10}{\left(\left\lvert{\frac{E_{true} - E_{computed}}{E_{true}}}\right\rvert\right)}$$ Achieving a smaller error in your computed energy will increase your score. For example, if the true energy is -42.141 and you compute -40.0, you would have a score of 13. Use the following code cell to trial different `hardware_inspired_trial`s.
###Code
# Modify the following variables
num_electrons = 2
num_molecular_orbitals = 2
hardware_inspired_trial = TwoLocal(rotation_blocks = ['ry'], entanglement_blocks = 'cz', entanglement='linear', reps=5)
#
computed_macromolecule_energy_result = construct_hamiltonian_solve_ground_state(
molecule=macromolecule,
num_electrons=num_electrons,
num_molecular_orbitals=num_molecular_orbitals,
chemistry_inspired=False,
hardware_inspired_trial=hardware_inspired_trial,
vqe=True,
perturbation_steps=[0],
)
###Output
_____no_output_____
###Markdown
Once you are ready to submit your answer, run the following code cell to have your computed energy scored. You can submit multiple times.
###Code
from qc_grader import grade_ex3e
grade_ex3e(
energy_surface_result=computed_macromolecule_energy_result,
molecule=macromolecule,
num_electrons=num_electrons,
num_molecular_orbitals=num_molecular_orbitals,
chemistry_inspired=False,
hardware_inspired_trial=hardware_inspired_trial,
vqe=True,
perturbation_steps=[0],
)
###Output
Submitting your answer for ex3/partE. Please wait...
Congratulations ๐! Your answer is correct and has been submitted.
Your score is 72.
###Markdown
---------------- Quantum Chemistry Resources*Videos*- *Quantum Chemistry I: Obtaining the Qubit Hamiltonian* - https://www.youtube.com/watch?v=2XEjrwWhr88- *Quantum Chemistry II: Finding the Ground States* - https://www.youtube.com/watch?v=_UW6puuGa5E - https://www.youtube.com/watch?v=o4BAOKbcd3o*Tutorials*- https://qiskit.org/documentation/nature/tutorials/01_electronic_structure.html - https://qiskit.org/documentation/nature/tutorials/03_ground_state_solvers.html - https://qiskit.org/documentation/nature/tutorials/05_Sampling_potential_energy_surfaces.html*Code References*- UCCSD : https://qiskit.org/documentation/stubs/qiskit.chemistry.components.variational_forms.UCCSD.html- ActiveSpaceTransformer: https://qiskit.org/documentation/nature/stubs/qiskit_nature.transformers.second_quantization.electronic.ActiveSpaceTransformer.html?highlight=activespacetransformerqiskit_nature.transformers.second_quantization.electronic.ActiveSpaceTransformer Licensing and notes:- All images used, with gratitude, are listed below with their respective licenses: - https://de.wikipedia.org/wiki/Datei:Teppichschere.jpg by CrazyD is licensed under CC BY-SA 3.0 - https://commons.wikimedia.org/wiki/File:The_structure_of_the_immature_HIV-1_capsid_in_intact_virus_particles.png by MarinaVladivostok is licensed under CC0 1.0 - https://commons.wikimedia.org/wiki/File:Peptidformationball.svg by YassineMrabet is licensed under the public domain - The remaining images are either IBM-owned, or hand-generated by the authors of this notebook.- HCONH2 (Formamide) co-ordinates kindly provided by the National Library of Medicine: - `National Center for Biotechnology Information (2021). PubChem Compound Summary for CID 713, Formamide. https://pubchem.ncbi.nlm.nih.gov/compound/Formamide.`- For further information about the Pauli exclusion principle:https://en.wikipedia.org/wiki/Pauli_exclusion_principle- We would like to thank collaborators, Prof Yasien and Prof Munro from Wits for extensive assistance.- We would like to thank all the testers and feedback providers for their valuable input.
###Code
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
###Output
_____no_output_____ |
Example_pipelines/CGI_example.ipynb | ###Markdown
Conservation-based Synthetic Lethal Search Introduction Rationale Use-cases:* Prioritize human candidate synthetic lethal interactions based on prior evidence of interaction in yeast SL screens* _de novo_ discovery of SL interactions ApproachThis notebook re-implements the approach outlined in Srivas et al. (2016) Usage:Add genes of interest to "inputGenes" value, then run the next step.Example: inputGenes = "'DDX3X','DICER1','DROSHA','TNFRSF14','TRAF7','TSC1','POLG','FBXO11','PRDM1','RFWD3','AMER1','LZTR1','ATP2B3'" Workflow Overview Datasets Yeast Synthetic Lethal InteractionsConstanzo et al. (2016) Human to Yeast Ortholog Mappingdetailed treatement in the accompanying notebook (Mapping human to yeast orthologs) Human Tumor Suppressor Genes References* Costanzo M, VanderSluis B, Koch EN, Baryshnikova A, Pons C, Tan G, Wang W, Usaj M, Hanchard J, Lee SD, Pelechano V, Styles EB, Billmann M, van Leeuwen J, van Dyk N, Lin ZY, Kuzmin E, Nelson J, Piotrowski JS, Srikumar T, Bahr S, Chen Y, Deshpande R, Kurat CF, Li SC, Li Z, Usaj MM, Okada H, Pascoe N, San Luis BJ, Sharifpoor S, Shuteriqi E, Simpkins SW, Snider J, Suresh HG, Tan Y, Zhu H, Malod-Dognin N, Janjic V, Przulj N, Troyanskaya OG, Stagljar I, Xia T, Ohya Y, Gingras AC, Raught B, Boutros M, Steinmetz LM, Moore CL, Rosebrock AP, Caudy AA, Myers CL, Andrews B, Boone C. **A global genetic interaction network maps a wiring diagram of cellular function.** Science. 2016 Sep 23;353(6306). pii: aaf1420. PubMed PMID: 27708008; PubMed Central PMCID: PMC5661885.* Srivas R, Shen JP, Yang CC, Sun SM, Li J, Gross AM, Jensen J, Licon K, Bojorquez-Gomez A, Klepper K, Huang J, Pekin D, Xu JL, Yeerna H, Sivaganesh V, Kollenstart L, van Attikum H, Aza-Blanc P, Sobol RW, Ideker T. **A Network of Conserved Synthetic Lethal Interactions for Exploration of Precision Cancer Therapy**. Mol Cell. 2016 Aug 4;63(3):514-25. doi:10.1016/j.molcel.2016.06.022.Epub 2016 Jul 21. PubMed PMID: 27453043; PubMed Central PMCID: PMC5209245. PreambleThis section describes how to setup the analysis environment appropriately, including google cloud platform authentication and importing all the relevant python libraries. Setup Analysis Environment
###Code
# This code block installs the dependencies, please run it only once, the first time you run this notebook
!pip3 install google-cloud-bigquery
!pip3 install matplotlib
!pip3 install plotly
# google cloud authentication
from google.cloud import bigquery
# import modules
import sys
import matplotlib.pyplot as plt
import pandas as pd
import scipy
from scipy import stats
import numpy as np
import json
import statsmodels.stats.multitest as multi
import matplotlib.pyplot as plt
import math
import ipywidgets as widgets
import plotly
import plotly.express as px
import pyarrow
# users need to run the following commend in their local machine or throught the notebook.
# Make sure to install the google cloud in the local envirionment. For more detail of gcloud installation, please see support from https://cloud.google.com/sdk/docs/install
!gcloud auth application-default login
# Choose the project to be used for bigquery
project_id='syntheticlethality'
client = bigquery.Client(project_id) # Replace XXXXXXXX with your project ID
%load_ext google.cloud.bigquery
###Output
_____no_output_____
###Markdown
Define a set of cancer-relevant tumor suppressor genes (TSGs)In this workflow, the search for relevant synthetic lethal interactions is seeded by defining a set of tumor suppressor genes (TSGs) of interest. There are various strategies for obtaining such a list, here we give an example of mining the [COSMIC Cancer Gene Census](https://cancer.sanger.ac.uk/census) for TSG annotations and then prioritizing the list based on driver status or frequency of alteration in a cancer type of interest.If you want to get the SL interactions for genes of interest, please add the genes to "inputGenes".
###Code
query = '''
SELECT *
FROM `isb-cgc.COSMIC_v90_grch38.Cancer_Gene_Census`
WHERE Role_in_Cancer = "TSG"
'''
tsg = client.query(query).result().to_dataframe()
tsg.head()
# generate a list for inputGenes (Please go to the next block if you want to use your genes instead of tumor suppressor genes)
tumor_suppressor_genes = tsg["Gene_Symbol"].tolist()
inputGenes = ["'"+x+"'" for x in tumor_suppressor_genes]
inputGenes = ','.join(inputGenes)
inputGenes
# please skip this block if you want to keep using tumor suppressor genes as an input
#inputGenes = ""
###Output
_____no_output_____
###Markdown
Map Yeast Orthologs & Get SL insteractions
###Code
sql = '''
WITH
--- Retreive YeastSymbols mapped to HumanSymbols for the input genes
INPUT_H2Y AS (
SELECT YeastSymbol
FROM `syntheticlethality.gene_information.human2Yeast`
WHERE HumanSymbol IN (__INPUTGENES__) AND
AlgorithmsMatch >= __ALGORITHMCUTOFF__
),
--- Identify protein-protein interactions using the YeastSymbols (left match)
Yeast_ITX1 AS (
SELECT UPPER(Query_allele_name) AS Interactor1,
UPPER(Array_allele_name) AS Interactor2,
Genetic_interaction_score_____ AS Interaction_score,
P_value
FROM `syntheticlethality.CellMap.CellMap`
WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND
(UPPER(Query_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y))
),
--- Identify protein-protein interactions using the YeastSymbols (right match)
Yeast_ITX2 AS (
SELECT UPPER(Array_allele_name) AS Interactor1,
UPPER(Query_allele_name) AS Interactor2,
Genetic_interaction_score_____ AS Interaction_score,
P_value
FROM `syntheticlethality.CellMap.CellMap`
WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND
(UPPER(Array_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y))
),
--- Union interaction tables
Union_ITX AS (
SELECT * FROM Yeast_ITX1
UNION ALL
SELECT * FROM Yeast_ITX2
)
--- Convert YeastSymbols to HumanSymbols in the protein-protein interations
SELECT DISTINCT
GINFO1.EntrezID AS EntrezID_Input,
H2Y1.HumanSymbol AS Gene_Input,
--- Add if you want to know what yeast genes are involved
--- YITX.Interactor1 AS Gene_Input_Yeast,
GINFO2.EntrezID AS EntrezID_SL_Candidate,
H2Y2.HumanSymbol AS Gene_SL_Candidate,
--- Add if you want to know what yeast genes are involved
--- YITX.Interactor2 AS Gene_SL_Candidate_Yeast,
YITX.Interaction_score AS Interaction_score,
YITX.P_value AS P_value
FROM Union_ITX AS YITX
LEFT JOIN `syntheticlethality.gene_information.human2Yeast` AS H2Y1 ON YITX.Interactor1 = H2Y1.YeastSymbol
LEFT JOIN `syntheticlethality.gene_information.human2Yeast` AS H2Y2 ON YITX.Interactor2 = H2Y2.YeastSymbol
LEFT JOIN `syntheticlethality.gene_information.gene_info_human_HGNC` AS GINFO1 ON H2Y1.HumanID = GINFO1.HGNCID
LEFT JOIN `syntheticlethality.gene_information.gene_info_human_HGNC` AS GINFO2 ON H2Y2.HumanID = GINFO2.HGNCID
WHERE (H2Y1.HumanSymbol IS NOT NULL AND YITX.Interactor1 IS NOT NULL) AND
(H2Y2.HumanSymbol IS NOT NULL AND YITX.Interactor2 IS NOT NULL)
'''
# select the thresholds to be used
cutoff_algorithmMatchNo = "3"
cutoff_score = "-0.35"
cutoff_p = "0.01"
sql = sql.replace("__INPUTGENES__", inputGenes)
sql = sql.replace("__ALGORITHMCUTOFF__", cutoff_algorithmMatchNo)
sql = sql.replace("__SCORECUTOFF__", cutoff_score)
sql = sql.replace("__PvalueCUTOFF__", cutoff_p)
res = client.query(sql).to_dataframe()
###Output
_____no_output_____
###Markdown
Get Yeast SL Interactions
###Code
# shore the SL partner genes for the input genes
res
###Output
_____no_output_____
###Markdown
Write to file & bigQuery Table
###Code
res.to_csv(path_or_buf='conserved_SL_output.csv', index=False)
###Output
_____no_output_____
###Markdown
Conservation-based Synthetic Lethal Pair Search ```Title: Conservation-based Synthetic Lethal Pair Search Authors: Taek-Kyun Kim Created: 02-07-2022 Purpose: Retrieve Synthetic Lethal Partners of Genes in a Given List Using Yeast Screen and Human-yeast Homology Information Notes: Runs in MyBinder ``` Introduction Rationale Use-cases:* Identify candidate synthetic lethal gene interactions for research prioritization based on prior evidence of interaction in yeast SL screens* _de novo_ discovery of SL interactions ApproachThis notebook re-implements the approach outlined in Srivas et al. (2016) Usage:Add genes of interest to "inputGenes" variable, then run the next step. Workflow Overview Datasets Yeast Synthetic Lethal InteractionsConstanzo et al. (2016) Human to Yeast Ortholog MappingDetailed examples of the methodology used can be found in the accompanying notebook (Mapping human to yeast orthologs) References* Costanzo M, VanderSluis B, Koch EN, Baryshnikova A, Pons C, Tan G, Wang W, Usaj M, Hanchard J, Lee SD, Pelechano V, Styles EB, Billmann M, van Leeuwen J, van Dyk N, Lin ZY, Kuzmin E, Nelson J, Piotrowski JS, Srikumar T, Bahr S, Chen Y, Deshpande R, Kurat CF, Li SC, Li Z, Usaj MM, Okada H, Pascoe N, San Luis BJ, Sharifpoor S, Shuteriqi E, Simpkins SW, Snider J, Suresh HG, Tan Y, Zhu H, Malod-Dognin N, Janjic V, Przulj N, Troyanskaya OG, Stagljar I, Xia T, Ohya Y, Gingras AC, Raught B, Boutros M, Steinmetz LM, Moore CL, Rosebrock AP, Caudy AA, Myers CL, Andrews B, Boone C. **A global genetic interaction network maps a wiring diagram of cellular function.** Science. 2016 Sep 23;353(6306). pii: aaf1420. PubMed PMID: 27708008; PubMed Central PMCID: PMC5661885.* Srivas R, Shen JP, Yang CC, Sun SM, Li J, Gross AM, Jensen J, Licon K, Bojorquez-Gomez A, Klepper K, Huang J, Pekin D, Xu JL, Yeerna H, Sivaganesh V, Kollenstart L, van Attikum H, Aza-Blanc P, Sobol RW, Ideker T. **A Network of Conserved Synthetic Lethal Interactions for Exploration of Precision Cancer Therapy**. Mol Cell. 2016 Aug 4;63(3):514-25. doi:10.1016/j.molcel.2016.06.022.Epub 2016 Jul 21. PubMed PMID: 27453043; PubMed Central PMCID: PMC5209245. PreambleThis section describes how to setup the analysis environment, including google cloud platform authentication and import of the necessary python libraries. Setup Analysis Environment
###Code
# This code block installs the dependencies, please run it only once, the first time you run this notebook
!pip3 install google-cloud-bigquery
!pip3 install matplotlib
!pip3 install plotly
!pip3 install scipy
!pip3 install statsmodels
!pip3 install ipywidgets
# import modules
from google.cloud import bigquery
import sys
import matplotlib.pyplot as plt
import pandas as pd
import scipy
from scipy import stats
import numpy as np
import json
import statsmodels.stats.multitest as multi
import matplotlib.pyplot as plt
import math
import ipywidgets as widgets
import plotly
import plotly.express as px
import pyarrow
###Output
_____no_output_____
###Markdown
Google AuthenticationRunning the BigQuery cells in this notebook requires a Google Cloud Project, instructions for creating a project can be found in the [Google Documentation](https://cloud.google.com/resource-manager/docs/creating-managing-projectsconsole). The instance needs to be authorized to bill the project for queries.For more information on getting started in the cloud see ['Quick Start Guide to ISB-CGC'](https://isb-cancer-genomics-cloud.readthedocs.io/en/latest/sections/HowToGetStartedonISB-CGC.html) and alternative authentication methods can be found in the [Google Documentation](https://googleapis.dev/python/google-api-core/latest/auth.html).
###Code
# Users need to run the following commend in their local machine or through the notebook.
# Make sure to install the google cloud SDK in the local environment. For more detailed instructions for gcloud installation, see support at https://cloud.google.com/sdk/docs/install
!gcloud auth application-default login
# Choose the project to be used for bigquery
project_id='syntheticlethality'
client = bigquery.Client(project_id) # Replace XXXXXXXX with your project ID
%load_ext google.cloud.bigquery
# a list for input genes
inputGenes = ["DDX3X","DICER1","DROSHA","TNFRSF14","TRAF7","TSC1",'POLG',
"FBXO11","PRDM1","RFWD3","AMER1","LZTR1","ATP2B3"]
inputGenes = ["'"+x+"'" for x in inputGenes]
inputGenes = ','.join(inputGenes)
inputGenes
###Output
_____no_output_____
###Markdown
Map Yeast Orthologs & Get SL insteractions To identify genetic interactions the colony growth of double mutant strains is compared to that of single mutant strains. A genetic interaction in this dataset is defined by the growth of a double mutant colony being higher or lower than the expected growth predicted from the two corresponding single mutant colonies. These positive or negative genetic interactions are quantified by a fitness metric or generic interaction score.Synthetic lethal interactions are defined as genetic interactions with negative scores (< -0.35) at the extreme end of the distribution. Yeast genes are mapped to human genes using yeast-human orthology and we presume that synthetic lethal pairs have a high likelihood of being maintained across species. The configurable parameters are listed as follows. Find the synthetic lethal partners of the genes in the given list.**Parameters****cutoff_algorithmMatchNo** is the desired minimum matching threshold required for a yeast-human gene comparison to be considered an ortholog.**cutoff_score** The desired cutoff of the quantitative fitness metric. The default setting is (< 0.35) corresponding to the left tail of the distribution(<-0.35).**cutoff_p** the desired significance threshold, the default value is p < 0.05.
###Code
sql = '''
WITH
--- Retrieve YeastSymbols mapped to HumanSymbols for the input genes
INPUT_H2Y AS (
SELECT YeastSymbol
FROM `isb-cgc-bq.annotations_versioned.Human2Yeast_mapping_Alliance_for_Genome_Resources_R3_0_1`
WHERE HumanSymbol IN (__INPUTGENES__) AND
AlgorithmsMatch >= __ALGORITHMCUTOFF__
),
--- Identify protein-protein interactions using the YeastSymbols (left match)
Yeast_ITX1 AS (
SELECT UPPER(Query_allele_name) AS Interactor1,
UPPER(Array_allele_name) AS Interactor2,
Genetic_interaction_score_____ AS Interaction_score,
P_value
FROM `isb-cgc-bq.supplementary_tables.Constanzo_etal_Science_2016_SGA_Genetic_Interactions`
WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND
(UPPER(Query_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y))
),
--- Identify protein-protein interactions using the YeastSymbols (right match)
Yeast_ITX2 AS (
SELECT UPPER(Array_allele_name) AS Interactor1,
UPPER(Query_allele_name) AS Interactor2,
Genetic_interaction_score_____ AS Interaction_score,
P_value
FROM `isb-cgc-bq.supplementary_tables.Constanzo_etal_Science_2016_SGA_Genetic_Interactions`
WHERE (Genetic_interaction_score_____ < __SCORECUTOFF__ AND P_value < __PvalueCUTOFF__) AND
(UPPER(Array_allele_name) IN (SELECT YeastSymbol FROM INPUT_H2Y))
),
--- Union interaction tables
Union_ITX AS (
SELECT * FROM Yeast_ITX1
UNION ALL
SELECT * FROM Yeast_ITX2
)
--- Convert YeastSymbols to HumanSymbols in the protein-protein interations
SELECT DISTINCT
GINFO1.EntrezID AS EntrezID_Input,
H2Y1.HumanSymbol AS Gene_Input,
--- Add if you want to know what yeast genes are involved
--- YITX.Interactor1 AS Gene_Input_Yeast,
GINFO2.EntrezID AS EntrezID_SL_Candidate,
H2Y2.HumanSymbol AS Gene_SL_Candidate,
--- Add if you want to know what yeast genes are involved
--- YITX.Interactor2 AS Gene_SL_Candidate_Yeast,
YITX.Interaction_score AS Interaction_score,
YITX.P_value AS P_value
FROM Union_ITX AS YITX
LEFT JOIN `isb-cgc-bq.annotations_versioned.Human2Yeast_mapping_Alliance_for_Genome_Resources_R3_0_1` AS H2Y1 ON YITX.Interactor1 = H2Y1.YeastSymbol
LEFT JOIN `isb-cgc-bq.annotations_versioned.Human2Yeast_mapping_Alliance_for_Genome_Resources_R3_0_1` AS H2Y2 ON YITX.Interactor2 = H2Y2.YeastSymbol
LEFT JOIN `isb-cgc-bq.synthetic_lethality.gene_info_human_HGNC_NCBI_2020_07` AS GINFO1 ON H2Y1.HumanID = GINFO1.HGNCID
LEFT JOIN `isb-cgc-bq.synthetic_lethality.gene_info_human_HGNC_NCBI_2020_07` AS GINFO2 ON H2Y2.HumanID = GINFO2.HGNCID
WHERE (H2Y1.HumanSymbol IS NOT NULL AND YITX.Interactor1 IS NOT NULL) AND
(H2Y2.HumanSymbol IS NOT NULL AND YITX.Interactor2 IS NOT NULL)
'''
# select the thresholds to be used
cutoff_algorithmMatchNo = "3"
cutoff_score = "-0.35"
cutoff_p = "0.01"
sql = sql.replace("__INPUTGENES__", inputGenes)
sql = sql.replace("__ALGORITHMCUTOFF__", cutoff_algorithmMatchNo)
sql = sql.replace("__SCORECUTOFF__", cutoff_score)
sql = sql.replace("__PvalueCUTOFF__", cutoff_p)
res = client.query(sql).to_dataframe()
###Output
_____no_output_____
###Markdown
Get Yeast SL Interactions
###Code
# List the SL partner genes for the input genes
res
###Output
_____no_output_____
###Markdown
**Gene_Input** the input gene symbol. **EntrezID_Input** shows the Entrez ids of the genes in the user's input gene list **EntrezID_SL_Candidate and Gene_SL_Candidate** the Entrez ids and gene symbols for the inferred synthetic lethal partners. **Interaction_score and P_value** the estimate of interaction strength between input gene and its SL partner in the isb-cgc-bq.supplementary_tables.Constanzo_etal_Science_2016_SGA_Genetic_Interactions table. The results can be saved to a csv file.
###Code
res.to_csv(path_or_buf='conserved_SL_output.csv', index=False)
###Output
_____no_output_____ |
04-Milestone Project - 1/04-OPTIONAL -Milestone Project 1 - Advanced Solution.ipynb | ###Markdown
Tic Tac Toe - Advanced SolutionThis solution follows the same basic format as the Complete Walkthrough Solution, but takes advantage of some of the more advanced statements we have learned. Feel free to download the notebook to understand how it works!
###Code
# Specifically for the iPython Notebook environment for clearing output
from IPython.display import clear_output
import random
# Global variables
theBoard = [' '] * 10 # a list of empty spaces
available = [str(num) for num in range(0,10)] # a List Comprehension
players = [0,'X','O'] # note that players[1] == 'X' and players[-1] == 'O'
def display_board(a,b):
print('Available TIC-TAC-TOE\n'+
' moves\n\n '+
a[7]+'|'+a[8]+'|'+a[9]+' '+b[7]+'|'+b[8]+'|'+b[9]+'\n '+
'----- -----\n '+
a[4]+'|'+a[5]+'|'+a[6]+' '+b[4]+'|'+b[5]+'|'+b[6]+'\n '+
'----- -----\n '+
a[1]+'|'+a[2]+'|'+a[3]+' '+b[1]+'|'+b[2]+'|'+b[3]+'\n')
display_board(available,theBoard)
def display_board(a,b):
print(f'Available TIC-TAC-TOE\n moves\n\n {a[7]}|{a[8]}|{a[9]} {b[7]}|{b[8]}|{b[9]}\n ----- -----\n {a[4]}|{a[5]}|{a[6]} {b[4]}|{b[5]}|{b[6]}\n ----- -----\n {a[1]}|{a[2]}|{a[3]} {b[1]}|{b[2]}|{b[3]}\n')
display_board(available,theBoard)
def place_marker(avail,board,marker,position):
board[position] = marker
avail[position] = ' '
def win_check(board,mark):
return ((board[7] == board[8] == board[9] == mark) or # across the top
(board[4] == board[5] == board[6] == mark) or # across the middle
(board[1] == board[2] == board[3] == mark) or # across the bottom
(board[7] == board[4] == board[1] == mark) or # down the middle
(board[8] == board[5] == board[2] == mark) or # down the middle
(board[9] == board[6] == board[3] == mark) or # down the right side
(board[7] == board[5] == board[3] == mark) or # diagonal
(board[9] == board[5] == board[1] == mark)) # diagonal
def random_player():
return random.choice((-1, 1))
def space_check(board,position):
return board[position] == ' '
def full_board_check(board):
return ' ' not in board[1:]
def player_choice(board,player):
position = 0
while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):
try:
position = int(input('Player %s, choose your next position: (1-9) '%(player)))
except:
print("I'm sorry, please try again.")
return position
def replay():
return input('Do you want to play again? Enter Yes or No: ').lower().startswith('y')
while True:
clear_output()
print('Welcome to Tic Tac Toe!')
toggle = random_player()
player = players[toggle]
print('For this round, Player %s will go first!' %(player))
game_on = True
input('Hit Enter to continue')
while game_on:
display_board(available,theBoard)
position = player_choice(theBoard,player)
place_marker(available,theBoard,player,position)
if win_check(theBoard, player):
display_board(available,theBoard)
print('Congratulations! Player '+player+' wins!')
game_on = False
else:
if full_board_check(theBoard):
display_board(available,theBoard)
print('The game is a draw!')
break
else:
toggle *= -1
player = players[toggle]
clear_output()
# reset the board and available moves list
theBoard = [' '] * 10
available = [str(num) for num in range(0,10)]
if not replay():
break
###Output
Welcome to Tic Tac Toe!
For this round, Player X will go first!
###Markdown
Tic Tac Toe - Advanced SolutionThis solution follows the same basic format as the Complete Walkthrough Solution, but takes advantage of some of the more advanced statements we have learned. Feel free to download the notebook to understand how it works!
###Code
# Specifically for the iPython Notebook environment for clearing output
from IPython.display import clear_output
import random
# Global variables
theBoard = [' '] * 10 # a list of empty spaces
available = [str(num) for num in range(0,10)] # a List Comprehension
players = [0,'X','O'] # note that players[1] == 'X' and players[-1] == 'O'
def display_board(a,b):
print('Available TIC-TAC-TOE\n'+
' moves\n\n '+
a[7]+'|'+a[8]+'|'+a[9]+' '+b[7]+'|'+b[8]+'|'+b[9]+'\n '+
'----- -----\n '+
a[4]+'|'+a[5]+'|'+a[6]+' '+b[4]+'|'+b[5]+'|'+b[6]+'\n '+
'----- -----\n '+
a[1]+'|'+a[2]+'|'+a[3]+' '+b[1]+'|'+b[2]+'|'+b[3]+'\n')
display_board(available,theBoard)
def display_board(a,b):
print(f'Available TIC-TAC-TOE\n moves\n\n {a[7]}|{a[8]}|{a[9]} {b[7]}|{b[8]}|{b[9]}\n ----- -----\n {a[4]}|{a[5]}|{a[6]} {b[4]}|{b[5]}|{b[6]}\n ----- -----\n {a[1]}|{a[2]}|{a[3]} {b[1]}|{b[2]}|{b[3]}\n')
display_board(available,theBoard)
def place_marker(avail,board,marker,position):
board[position] = marker
avail[position] = ' '
def win_check(board,mark):
return ((board[7] == board[8] == board[9] == mark) or # across the top
(board[4] == board[5] == board[6] == mark) or # across the middle
(board[1] == board[2] == board[3] == mark) or # across the bottom
(board[7] == board[4] == board[1] == mark) or # down the middle
(board[8] == board[5] == board[2] == mark) or # down the middle
(board[9] == board[6] == board[3] == mark) or # down the right side
(board[7] == board[5] == board[3] == mark) or # diagonal
(board[9] == board[5] == board[1] == mark)) # diagonal
def random_player():
return random.choice((-1, 1))
def space_check(board,position):
return board[position] == ' '
def full_board_check(board):
return ' ' not in board[1:]
def player_choice(board,player):
position = 0
while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):
try:
position = int(input('Player %s, choose your next position: (1-9) '%(player)))
except:
print("I'm sorry, please try again.")
return position
def replay():
return input('Do you want to play again? Enter Yes or No: ').lower().startswith('y')
while True:
clear_output()
print('Welcome to Tic Tac Toe!')
toggle = random_player()
player = players[toggle]
print('For this round, Player %s will go first!' %(player))
game_on = True
input('Hit Enter to continue')
while game_on:
display_board(available,theBoard)
position = player_choice(theBoard,player)
place_marker(available,theBoard,player,position)
if win_check(theBoard, player):
display_board(available,theBoard)
print('Congratulations! Player '+player+' wins!')
game_on = False
else:
if full_board_check(theBoard):
display_board(available,theBoard)
print('The game is a draw!')
break
else:
toggle *= -1
player = players[toggle]
clear_output()
# reset the board and available moves list
theBoard = [' '] * 10
available = [str(num) for num in range(0,10)]
if not replay():
break
###Output
Welcome to Tic Tac Toe!
For this round, Player X will go first!
|
temas/I.computo_cientifico/1.6.Perfilamiento_Python.ipynb | ###Markdown
**Notas para contenedor de docker:** Comando de docker para ejecuciรณn de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.```docker run --rm -v :/datos --name jupyterlab_numerical -p 8888:8888 -p 8786:8786 -p 8787:8787 -d palmoreck/jupyterlab_numerical:1.1.0```password para jupyterlab: `qwerty`Detener el contenedor de docker:```docker stop jupyterlab_numerical``` Documentaciรณn de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Esta nota utiliza mรฉtodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) Instalamos las herramientas que nos ayudarรกn al perfilamiento:
###Code
%pip install -q --user line_profiler
%pip install -q --user memory_profiler
%pip install -q --user psutil
%pip install -q --user guppy3
###Output
[33mWARNING: You are using pip version 19.3.1; however, version 20.0.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Note: you may need to restart the kernel to use updated packages.
###Markdown
La siguiente celda reiniciarรก el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook.
###Code
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import math
from scipy.integrate import quad
###Output
_____no_output_____
###Markdown
Perfilamiento en Python En esta nota revisamos algunas herramientas de Python para perfilamiento de cรณdigo: uso de cpu y memoria.Mediciรณn de tiempos con:* Mรณdulo [time](https://docs.python.org/3/library/time.htmltime.time) de Python.* [%time](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-time) de comandos de magic <- esta herramienta es sรณlo para medir tiempos de un statement y sรณlo la coloco para referencia pero no se usarรก en la nota.* [/usr/bin/time](https://en.wikipedia.org/wiki/Time_(Unix)) de `Unix`.* [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-timeit) de comandos de magic.Perfilamiento:* De CPU con: [line_profiler](https://pypi.org/project/line-profiler/), [CProfile](https://docs.python.org/2/library/profile.html) que es `built-in` en la *standard-library* de Python.* De memoria con: [memory_profiler](https://pypi.org/project/memory-profiler/) y [heapy](https://pypi.org/project/guppy/). Mediciรณn de tiempos El primer acercamiento que usamos en la nota para perfilar nuestro cรณdigo es identificar quรฉ es lento, otras mediciones son la cantidad de RAM, el I/O en disco o network. 1) Uso de `time`
###Code
import time
###Output
_____no_output_____
###Markdown
Regla compuesta del rectรกngulo **Ejemplo de implementaciรณn de regla compuesta de rectรกngulo: usando math** Utilizar la regla compuesta del rectรกngulo para aproximar la integral $\int_0^1e^{-x^2}dx$ con $10^6$ subintervalos.
###Code
f=lambda x: math.exp(-x**2) #using math library
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
n=10**6
start_time = time.time()
aprox=Rcf(f,0,1,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf tomรณ",secs,"segundos" )
###Output
Rcf tomรณ 0.3661477565765381 segundos
###Markdown
**Obs:** recuรฉrdese que hay que evaluar que se estรฉ resolviendo correctamente el problema. En este caso el error relativo nos ayuda
###Code
def err_relativo(aprox, obj):
return math.fabs(aprox-obj)/math.fabs(obj) #obsรฉrvese el uso de la librerรญa math
obj, err = quad(f, 0, 1)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
**Comentarios:*** Tรณmese en cuenta que al medir tiempos de ejecuciรณn, siempre hay variaciรณn en la mediciรณn. Tal variaciรณn es normal.* Considรฉrese que la mรกquina en la que se estรกn corriendo las pruebas puede estar realizando otras tareas mientras se ejecuta el cรณdigo, por ejemplo acceso a la red, al disco o a la RAM. Por ello, son factores que pueden causar variaciรณn en el tiempo de ejecuciรณn del programa.* Si se van a realizar reportes de tiempos, es importante indicar las caracterรญsticas de la mรกquina en la que se estรกn haciendo las pruebas, p.ej: Dell E6420 con un procesador Intel Core I7-2720QM (2.20 GHz, 6 MB cache, Quad Core) y 8 GB de RAM en un Ubuntu $13.10$. 2) Uso de `/usr/bin/time` de Unix Para la lรญnea de comando `/usr/bin/time` primero escribimos el siguiente archivo en la ruta donde se encuentra este notebook con la lรญnea de comando magic `%file`
###Code
%%file Rcf.py
import math
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n)))
###Output
Writing Rcf.py
###Markdown
Ver [liga](https://stackoverflow.com/questions/419163/what-does-if-name-main-do) y [liga2](https://es.stackoverflow.com/questions/32165/qu%C3%A9-es-if-name-main) para saber quรฉ es lo que hace la lรญnea `if __name__ == "__main__":` Lo siguiente es necesario si no tienen instalado el comando `/usr/bin/time`:
###Code
%%bash
sudo apt-get install time
%%bash
/usr/bin/time -p python3 Rcf.py #la p es de portabilidad,
#ver: http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html
#para mayor informaciรณn
###Output
aproximaciรณn: 7.468241e-01
###Markdown
**Comentarios:*** `real` que mide el wall clock o elapsed time.* `user` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ para funciones que no estรกn relacionadas con el kernel* del sistema.* `sys` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ en funciones a nivel de kernel del sistema.\*Ver [kernel operating system](https://en.wikipedia.org/wiki/Kernel_(operating_system)) para definiciรณn del kernel de una mรกquina.**Obs:** Una funciรณn relacionada con el kernel del sistema es el alojamiento de memoria al crear una variable. Otras son las instrucciones relacionadas con el I/O como leer de la memoria, disco o network.* La ventaja de `/usr/bin/time` es que no es especรญfico de Python.* Este comando incluye el tiempo que le toma al sistema iniciar el ejecutable de python (que puede ser significativo si se inician muchos procesos vs un sรณlo proceso). En el caso de tener short-running scripts donde el tiempo de inicio es significativo del tiempo total entonces `/usr/bin/time` puede ser una medida รบtil.**Nota:** Si se suma `user` con `sys` se tiene una idea de cuรกnto tiempo se gastรณ en la CPU y la diferencia entre este resultado y `real` da una idea de cuรกnto tiempo se gastรณ para I/O o tambiรฉn puede dar una idea de la cantidad de tiempo que se ocupรณ el sistema en correr otras tareas. * Se puede utilizar la flag `verbose` para obtener mรกs informaciรณn:
###Code
%%bash
/usr/bin/time --verbose python3 Rcf.py
###Output
aproximaciรณn: 7.468241e-01
###Markdown
y una explicaciรณn (breve) del output se puede encontrar [aquรญ](http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html). Para el caso de `Major (requiring I/O)` nos interesa que sea $0$ pues indica que el sistema operativo tiene que cargar pรกginas de datos del disco pues tales datos ya no residen en RAM (por alguna razรณn). 3) Uso de `%timeit` El mรณdulo de `timeit` es otra forma de medir el tiempo de ejecuciรณn en la CPU.**Nota:** el mรณdulo de `timeit` desabilita temporalmente el garbage collector* de Python (esto es, no habrรก desalojamiento en memoria de objetos de Python que no se utilicen). Si el garbage collector es invocado en tus operaciones para un ejemplo del mundo real, esto puede ser una razรณn de posibles diferencias que obtengas en las mediciones de tiempo. *sugiero buscar quรฉ es el garbage collector en blogs, por ejemplo: [liga](https://rushter.com/blog/python-garbage-collector/) o [liga2](https://stackify.com/python-garbage-collection/) o [liga3](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation).
###Code
%timeit?
%timeit -n 5 -r 10 Rcf(f,0,1,n)
###Output
333 ms ยฑ 11.1 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
para este caso se estรก ejecutando la funciรณn `Rcf` en un loop de tamaรฑo $5$, se estรกn promediando los tiempos de las $5$ ejecuciones y calculando su desviaciรณn estรกndar y al repetir esto $10$ veces se estรก reportando el mejor resultado. $ms$ es milisecond, $\mu s$ es microsecond y $ns$ es nanosecond. **Comentarios:*** `timeit` se recomienda usar para secciones de cรณdigo pequeรฑas. Para secciones mรกs grandes tรญpicamente modificar el valor de $n$ (ejecutar el cรณdigo n veces en un loop) resulta en mediciones distintas.* Ejecuta `timeit` varias ocasiones para asegurarse que se obtienen tiempos similares. Si observas una gran variaciรณn en las mediciones de tiempo entre distintas repeticiones de `timeit`, realiza mรกs repeticiones hasta tener un resultado estable. Mediciรณn de uso de CPU 1) Uso de cProfile `cProfile` es una herramienta **built-in** en la standard library para perfilamiento. Se utiliza con la implementaciรณn `CPython` de `Python` (ver [liga](https://stackoverflow.com/questions/17130975/python-vs-cpython) para explicaciรณn de implementaciones de Python) para medir el tiempo de ejecuciรณn de cada funciรณn en el programa. Se ejecuta desde la lรญnea de comandos o con un comando de magic. La flag `-s` indica que se ordene el resultado por el tiempo acumulado dentro de cada funciรณn. El output siguiente de `cProfile` muestra:* El tiempo total de ejecuciรณn, el cual incluye el tiempo del bloque de cรณdigo que estamos midiendo y el overhead al usar `cProfile`. Por esta razรณn se tiene un mayor tiempo de ejecuciรณn que con las mediciones de tiempo anteriores.* La columna `ncalls` que como el nombre indica, muestra el nรบmero de veces que se llamรณ a cada funciรณn. En este caso las funciones `lambda` y `math.exp` son las que se llaman un mayor nรบmero de veces: $n=10^6$ veces. La columna`tottime` muestra el tiempo que tardaron estas funciones en ejecutarse (sin llamar a otras funciones).* La columna `percall` es el cociente entre `tottime` y `ncalls`.* La columna `cumtime` contiene el tiempo gastado en la funciรณn y en las demรกs que llama. Por ejemplo la funciรณn `Rcf` llama a `listcomp` por lo que es natural que `Rcf` estรฉ mรกs arriba en el output ordenado de `cProfile`. Esto tambiรฉn ocurre con `lambda` y `math.exp` pues la primera llama a la segunda.* La columna de `percall` es un cociente entre la columna `cumtime` y el llamado a primitivas.* La รบltima columna indica informaciรณn de la funciรณn y la lรญnea en la que se encuentra dentro del cรณdigo. Por ejemplo la lรญnea $1$ de mรณdulo es el llamado a la funciรณn `__main__`. La lรญnea $2$ es el llamado a la funciรณn `Rcf`. Por lo que es prรกcticamente negligible el llamado a `__main__`.
###Code
%%bash
python3 -m cProfile -s cumulative Rcf.py
###Output
aproximaciรณn: 7.468241e-01
2000068 function calls in 0.649 seconds
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.649 0.649 {built-in method builtins.exec}
1 0.021 0.021 0.649 0.649 Rcf.py:1(<module>)
1 0.155 0.155 0.627 0.627 Rcf.py:2(Rcf)
1000000 0.252 0.000 0.343 0.000 Rcf.py:23(<lambda>)
1 0.129 0.129 0.129 0.129 Rcf.py:16(<listcomp>)
1000000 0.090 0.000 0.090 0.000 {built-in method math.exp}
1 0.000 0.000 0.001 0.001 <frozen importlib._bootstrap>:966(_find_and_load)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
2 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 0.000 0.000 {built-in method _imp.create_builtin}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
1 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:103(release)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
1 0.000 0.000 0.000 0.000 {built-in method builtins.print}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
2 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
2 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
1 0.000 0.000 0.000 0.000 {built-in method builtins.any}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
4 0.000 0.000 0.000 0.000 {built-in method builtins.getattr}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:369(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message)
2 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects}
2 0.000 0.000 0.000 0.000 {method 'rpartition' of 'str' objects}
1 0.000 0.000 0.000 0.000 {built-in method _imp.is_builtin}
4 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:307(__init__)
4 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:143(__init__)
3 0.000 0.000 0.000 0.000 {built-in method _imp.acquire_lock}
3 0.000 0.000 0.000 0.000 {built-in method _imp.release_lock}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 {built-in method _imp.exec_builtin}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:424(has_location)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:753(is_package)
###Markdown
**Nota:** Recordar que el output de `CProfile` con la flag `-s cumulative` estรก ordenando por el gasto en tiempo de las funciones que son llamadas en el bloque de cรณdigo analizado. No estรก ordenando por parent functions. Para tener un output en el que se tenga quรฉ funciones llaman a quรฉ otras se puede utilizar lo siguiente:
###Code
%%bash
python3 -m cProfile -o profile.stats Rcf.py
import pstats
p = pstats.Stats("profile.stats")
p.sort_stats("cumulative")
p.print_stats()
p.print_callers()
###Output
Ordered by: cumulative time
Function was called by...
ncalls tottime cumtime
{built-in method builtins.exec} <-
Rcf.py:1(<module>) <- 1 0.014 0.587 {built-in method builtins.exec}
Rcf.py:2(Rcf) <- 1 0.131 0.573 Rcf.py:1(<module>)
Rcf.py:23(<lambda>) <- 1000000 0.218 0.300 Rcf.py:2(Rcf)
Rcf.py:16(<listcomp>) <- 1 0.142 0.142 Rcf.py:2(Rcf)
{built-in method math.exp} <- 1000000 0.082 0.082 Rcf.py:23(<lambda>)
<frozen importlib._bootstrap>:966(_find_and_load) <- 1 0.000 0.001 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:936(_find_and_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:651(_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
<frozen importlib._bootstrap>:564(module_from_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:728(create_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
<frozen importlib._bootstrap>:211(_call_with_frames_removed) <- 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
{built-in method _imp.create_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:147(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:870(_find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
<frozen importlib._bootstrap>:157(_get_module_lock) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
<frozen importlib._bootstrap>:707(find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{method 'format' of 'str' objects} <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:433(spec_from_loader) <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
<frozen importlib._bootstrap>:58(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
<frozen importlib._bootstrap>:504(_init_module_attrs) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
{built-in method builtins.print} <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:318(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:151(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:78(acquire) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
<frozen importlib._bootstrap>:103(release) <- 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
<frozen importlib._bootstrap>:232(_requires_builtin_wrapper) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
{built-in method builtins.hasattr} <- 2 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
{built-in method builtins.getattr} <- 4 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
{built-in method builtins.any} <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
<frozen importlib._bootstrap>:369(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
<frozen importlib._bootstrap>:416(parent) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:736(exec_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:176(cb) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
{built-in method _imp.is_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
<frozen importlib._bootstrap>:311(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:143(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:222(_verbose_message) <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
{method 'get' of 'dict' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:307(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:321(<genexpr>) <- 4 0.000 0.000 {built-in method builtins.any}
<frozen importlib._bootstrap>:847(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{method 'rpartition' of 'str' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
{built-in method _thread.allocate_lock} <- 2 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
{built-in method _thread.get_ident} <- 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 <frozen importlib._bootstrap>:103(release)
{built-in method _imp.acquire_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
{built-in method _imp.release_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
{built-in method _imp.exec_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:843(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{method 'disable' of '_lsprof.Profiler' objects} <-
<frozen importlib._bootstrap>:424(has_location) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:753(is_package) <- 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
###Markdown
y podemos tambiรฉn tener la informaciรณn de a quรฉ funciones llamรณ cada funciรณn
###Code
p.print_callees()
###Output
Ordered by: cumulative time
Function called...
ncalls tottime cumtime
{built-in method builtins.exec} -> 1 0.014 0.587 Rcf.py:1(<module>)
Rcf.py:1(<module>) -> 1 0.000 0.001 <frozen importlib._bootstrap>:966(_find_and_load)
1 0.131 0.573 Rcf.py:2(Rcf)
1 0.000 0.000 {built-in method builtins.print}
1 0.000 0.000 {method 'format' of 'str' objects}
Rcf.py:2(Rcf) -> 1 0.142 0.142 Rcf.py:16(<listcomp>)
1000000 0.218 0.300 Rcf.py:23(<lambda>)
Rcf.py:23(<lambda>) -> 1000000 0.082 0.082 {built-in method math.exp}
Rcf.py:16(<listcomp>) ->
{built-in method math.exp} ->
<frozen importlib._bootstrap>:966(_find_and_load) -> 1 0.000 0.000 <frozen importlib._bootstrap>:143(__init__)
1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
1 0.000 0.000 {method 'get' of 'dict' objects}
<frozen importlib._bootstrap>:936(_find_and_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
1 0.000 0.000 {method 'rpartition' of 'str' objects}
<frozen importlib._bootstrap>:651(_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:307(__init__)
1 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
1 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:564(module_from_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:728(create_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:211(_call_with_frames_removed) -> 1 0.000 0.000 {built-in method _imp.create_builtin}
1 0.000 0.000 {built-in method _imp.exec_builtin}
{built-in method _imp.create_builtin} ->
<frozen importlib._bootstrap>:147(__enter__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
<frozen importlib._bootstrap>:870(_find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
<frozen importlib._bootstrap>:157(_get_module_lock) -> 1 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
1 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 {built-in method _imp.release_lock}
<frozen importlib._bootstrap>:707(find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 {built-in method _imp.is_builtin}
{method 'format' of 'str' objects} ->
<frozen importlib._bootstrap>:433(spec_from_loader) -> 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
1 0.000 0.000 <frozen importlib._bootstrap>:369(__init__)
2 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:58(__init__) -> 2 0.000 0.000 {built-in method _thread.allocate_lock}
<frozen importlib._bootstrap>:504(_init_module_attrs) -> 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 <frozen importlib._bootstrap>:424(has_location)
4 0.000 0.000 {built-in method builtins.getattr}
{built-in method builtins.print} ->
<frozen importlib._bootstrap>:318(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message)
1 0.000 0.000 {built-in method builtins.any}
<frozen importlib._bootstrap>:151(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:103(release)
<frozen importlib._bootstrap>:78(acquire) -> 1 0.000 0.000 {built-in method _thread.get_ident}
<frozen importlib._bootstrap>:103(release) -> 1 0.000 0.000 {built-in method _thread.get_ident}
<frozen importlib._bootstrap>:232(_requires_builtin_wrapper) -> 1 0.000 0.000 <frozen importlib._bootstrap>:753(is_package)
{built-in method builtins.hasattr} ->
{built-in method builtins.getattr} ->
{built-in method builtins.any} -> 4 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>)
<frozen importlib._bootstrap>:369(__init__) ->
<frozen importlib._bootstrap>:416(parent) -> 1 0.000 0.000 {method 'rpartition' of 'str' objects}
<frozen importlib._bootstrap>:736(exec_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:176(cb) -> 1 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 {built-in method _imp.release_lock}
1 0.000 0.000 {method 'get' of 'dict' objects}
{built-in method _imp.is_builtin} ->
<frozen importlib._bootstrap>:311(__enter__) ->
<frozen importlib._bootstrap>:143(__init__) ->
<frozen importlib._bootstrap>:222(_verbose_message) ->
{method 'get' of 'dict' objects} ->
<frozen importlib._bootstrap>:307(__init__) ->
<frozen importlib._bootstrap>:321(<genexpr>) ->
<frozen importlib._bootstrap>:847(__exit__) -> 1 0.000 0.000 {built-in method _imp.release_lock}
{method 'rpartition' of 'str' objects} ->
{built-in method _thread.allocate_lock} ->
{built-in method _thread.get_ident} ->
{built-in method _imp.acquire_lock} ->
{built-in method _imp.release_lock} ->
{built-in method _imp.exec_builtin} ->
<frozen importlib._bootstrap>:843(__enter__) -> 1 0.000 0.000 {built-in method _imp.acquire_lock}
{method 'disable' of '_lsprof.Profiler' objects} ->
<frozen importlib._bootstrap>:424(has_location) ->
<frozen importlib._bootstrap>:753(is_package) ->
###Markdown
El comando de magic es `%prun`:
###Code
%prun -s cumulative Rcf(f,0,1,n)
###Output
###Markdown
2) Uso de line_profiler `line_profiler` trabaja perfilando el cรณdigo de forma individual funciones lรญnea por lรญnea. La idea serรญa perfilar primero con `CProfile` al programa para identificar aquellas funciones que gastan un mayor tiempo de ejecuciรณn y posteriormente perfilarlas con `line_profiler`. **Comentario:** una buena prรกctica es guardar las diferentes versiones de tu cรณdigo cuando vas modificรกndolo para tener un registro de tus cambios. Puede ejecutarse desde la lรญnea de comandos o cargarse en IPython con el comando magic `load_ext`:
###Code
%load_ext line_profiler
%lprun?
###Output
_____no_output_____
###Markdown
En el siguiente output:* La columna `%Time` contiene el porcentaje de tiempo gastado. En el caso que se perfila, la lรญnea`sum_res=sum_res+f(node)` es en la que mรกs porcentaje del tiempo se gasta. Seguida de la lรญnea del `for` y de la lรญnea donde se hace uso de [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions) para crear a los nodos de integraciรณn numรฉrica.
###Code
%lprun -f Rcf Rcf(f,0,1,n)
###Output
_____no_output_____
###Markdown
Con la evidencia generada con `line_profiler` ยฟpodrรญamos escribir una funciรณn que fuera mรกs rรกpida? Lo primero que podemos hacer es utilizar un [generator](https://wiki.python.org/moin/Generators) en lugar de una lista:
###Code
def Rcf2(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf2 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
###Output
_____no_output_____
###Markdown
medir con `%timeit`:
###Code
%timeit -n 5 -r 10 Rcf2(f,0,1,n)
aprox=Rcf2(f,0,1,n)
###Output
_____no_output_____
###Markdown
revisar que estรก correcta esta nueva implementaciรณn:
###Code
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
perfilarla con `line_profiler`:
###Code
%lprun -f Rcf2 Rcf2(f,0,1,n)
###Output
_____no_output_____
###Markdown
y observar que la lรญnea en la que se creaba la lista ahora es despreciable el porcentaje de tiempo que se gasta en ella. Podemos hacer una implementaciรณn que se encargue del gasto del tiempo en la lรญnea del `for`:
###Code
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum((f(node) for node in nodes))
return h_hat*suma_res
###Output
_____no_output_____
###Markdown
medir con `%timeit`:
###Code
%timeit -n 5 -r 10 Rcf3(f,0,1,n)
###Output
363 ms ยฑ 4.99 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
revisar que estรก correcta esta nueva implementaciรณn:
###Code
aprox=Rcf3(f,0,1,n)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
perfilarla con `line_profiler`:
###Code
%lprun -f Rcf3 Rcf3(f,0,1,n)
###Output
_____no_output_____
###Markdown
y se tiene la mayorรญa del porcentaje de tiempo ahora en una sola lรญnea. Recuรฉrdese que el resultado de `Cprofile` indicรณ que se llama a la funciรณn `lambda` y `math.exp` $n=10^6$ veces. Una implementaciรณn de la regla del rectรกngulo con menor nรบmero de llamadas a funciones (y por tanto menor tiempo) serรญa:
###Code
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
%lprun -f Rcf4 Rcf4(0,1,n)
%timeit -n 5 -r 10 Rcf4(0,1,n)
aprox=Rcf4(0,1,n)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
Si bien esta implementaciรณn es la mรกs rรกpida hasta este punto no es tan flexible pues estรก calculando la regla del rectรกngulo para una funciรณn definida dentro de la misma funciรณn. Si quisiรฉramos calcular la regla para otra funciรณn se tendrรญa que directamente modificar la funciรณn `Rcf` lo cual no es flexible. Aunque `Rcf4` es mรกs rรกpida preferimos `Rcf3` por su flexibilidad y menor uso de recursos (que se verรก con el `memory_profiler` mรกs adelante).
###Code
def Rcf5(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
f_nodes=(math.exp(-(a+(i+1/2)*h_hat)**2) for i in range(0,n))
suma_res = sum(f_nodes)
return h_hat*suma_res
%lprun -f Rcf5 Rcf5(0,1,n)
%timeit -n 5 -r 10 Rcf5(0,1,n)
aprox=Rcf5(0,1,n)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
Obsรฉrvese que en una lรญnea se estรกn construyendo nodos y transformando con `math.exp` en `Rcf5`. Aunque esta implementaciรณn es la mรกs rรกpida hasta ahora, no se sugiere usarla pues le falta flexibilidad como `Rcf4` y no es recomendable en una lรญnea construir datos y transformarlos. Combinar operaciones en una sola lรญnea resulta en cรณdigo difรญcil de leer. Es mejor separar en dos funciones estas dos tareas por si falla una sepamos cuรกl fallรณ y por quรฉ fallรณ. **Ejemplo de ejecuciรณn de line_profiler desde la lรญnea de comandos:**
###Code
%%file Rcf4.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con line_profiler
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
if __name__ == "__main__":
n=10**6
print("aproximaciรณn: {:0.6e}".format(Rcf4(0,1,n)))
%%bash
$HOME/.local/bin/kernprof -l -v Rcf4.py
###Output
aproximaciรณn: 7.468241e-01
Wrote profile results to Rcf4.py.lprof
Timer unit: 1e-06 s
Total time: 0.77406 s
File: Rcf4.py
Function: Rcf4 at line 2
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2 @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con line_profiler
4 def Rcf4(a,b,n):
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
9 Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors
10 Args:
11 f (lambda expression): lambda expression of integrand
12 a (int): left point of interval
13 b (int): right point of interval
14 n (int): number of subintervals
15 Returns:
16 Rcf4 (float)
17 """
18 1 2.0 2.0 0.0 h_hat=(b-a)/n
19 1 5.0 5.0 0.0 nodes=(a+(i+1/2)*h_hat for i in range(0,n))
20 1 774051.0 774051.0 100.0 suma_res = sum(((math.exp(-node**2) for node in nodes)))
21 1 2.0 2.0 0.0 return h_hat*suma_res
###Markdown
Observese en el output de `CProfile` siguiente para la funciรณn `Rcf4` que las lรญneas con mayor gasto en el tiempo total son: ``` nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes)))```
###Code
import math
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
%prun -s cumulative Rcf4(0,1,n)
###Output
###Markdown
Uso de memoria RAM Al realizar anรกlisis del uso de memoria de tu cรณdigo podemos responder preguntas como:* ยฟEs posible utilizar menos RAM al reescribir mi funciรณn para que trabaje mรกs eficientemente?* ยฟPodemos usar mรกs RAM para aprovechar mejor el uso del cachรฉ? 1)Uso de `%memit` Es equivalente a `%timeit` en el sentido que realiza una serie de repeticiones para obtener un resultado estable del bloque de cรณdigo analizado.
###Code
%load_ext memory_profiler
%memit?
###Output
_____no_output_____
###Markdown
Primero medimos cuรกnto RAM estรก utilizando el proceso del notebook:
###Code
%memit #how much RAM this process is consuming
###Output
peak memory: 119.73 MiB, increment: 0.00 MiB
###Markdown
Y podemos realizar mediciones para cada una de las implementaciones de la regla del rectรกngulo:
###Code
%memit -c Rcf(f,0,1,n)
%memit -c Rcf2(f,0,1,n)
%memit -c Rcf3(f,0,1,10**5)
%memit -c Rcf4(0,1,10**5)
%memit -c Rcf5(0,1,10**5)
###Output
peak memory: 228.77 MiB, increment: 103.05 MiB
###Markdown
El uso de `generators` nos ayuda a disminuir la cantidad de memoria RAM usada por nuestro proceso. 2) Uso de `memory_profiler` Para mediciรณn de memoria lรญnea por lรญnea utilizamos `memory_profiler`. Se ejecuta mรกs lento que `line_profiler` (entre $10$ y $100$ veces mรกs lento!) y mejora su velocidad de ejecuciรณn al instalar el paquete `psutil`. Con lรญnea de comandos se ejecuta como sigue:
###Code
%%file Rcf_memory_profiler.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con memory_profiler
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n)))
###Output
Writing Rcf_memory_profiler.py
###Markdown
En el output siguiente se observa que la lรญnea que mรกs incrementa la cantidad de RAM alojada para el proceso que contiene la ejecuciรณn de la funciรณn `Rcf` es la creaciรณn de la lista de nodos `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`. **Cuidado:** el valor de la columna `Increment` para esta lรญnea no necesariamente indica que la lista `nodes` ocupa en memoria $512 MB$'s, sรณlo que para la alocaciรณn de la lista el proceso creciรณ en $512 MB$'s**Nota:** en el output aparece $MiB$ que son mebibytes. Aunque no se cumple que un mebibyte sea igual a un megabyte, se toma en este comentario como megabytes pues la diferencia entre estas unidades es sutil.
###Code
%%bash
python3 -m memory_profiler Rcf_memory_profiler.py
###Output
aproximaciรณn: 7.468241e-01
Filename: Rcf_memory_profiler.py
Line # Mem usage Increment Line Contents
================================================
2 37.750 MiB 37.750 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con memory_profiler
4 def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
9 Args:
10 f (lambda expression): lambda expression of integrand
11 a (int): left point of interval
12 b (int): right point of interval
13 n (int): number of subintervals
14 Returns:
15 Rcf (float)
16 """
17 37.750 MiB 0.000 MiB h_hat=(b-a)/n
18 69.012 MiB 0.512 MiB nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
19 69.012 MiB 0.000 MiB sum_res=0
20 69.012 MiB 0.000 MiB for node in nodes:
21 69.012 MiB 0.000 MiB sum_res=sum_res+f(node)
22 69.012 MiB 0.000 MiB return h_hat*sum_res
###Markdown
Como ya se habรญa notado, los generators ahorran memoria:
###Code
%%file Rcf3_memory_profiler.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con memory_profiler
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum((f(node) for node in nodes))
return h_hat*suma_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf3
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf3(f,0,1,n)))
###Output
Writing Rcf3_memory_profiler.py
###Markdown
En el output siguiente el proceso que involucra la ejecuciรณn de la funciรณn `Rcf3` no incrementa el uso de memoria RAM por el uso de generators:
###Code
%%bash
python3 -m memory_profiler Rcf3_memory_profiler.py
###Output
aproximaciรณn: 7.468241e-01
Filename: Rcf3_memory_profiler.py
Line # Mem usage Increment Line Contents
================================================
2 37.590 MiB 37.590 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con memory_profiler
4 def Rcf3(f,a,b,n):
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
9 Args:
10 f (lambda expression): lambda expression of integrand
11 a (int): left point of interval
12 b (int): right point of interval
13 n (int): number of subintervals
14 Returns:
15 Rcf3 (float)
16 """
17 37.590 MiB 0.000 MiB h_hat=(b-a)/n
18 37.590 MiB 0.000 MiB nodes=(a+(i+1/2)*h_hat for i in range(0,n))
19 37.590 MiB 0.000 MiB suma_res = sum((f(node) for node in nodes))
20 37.590 MiB 0.000 MiB return h_hat*suma_res
###Markdown
3) Uso de heapy Con `heapy` podemos revisar el nรบmero y tamaรฑo de cada objeto que estรก en el heap de Python (ver [liga](https://docs.python.org/3/c-api/memory.html) y [liga2](https://stackoverflow.com/questions/14546178/does-python-have-a-stack-heap-and-how-is-memory-managed) para memory management). Tambiรฉn ayuda a encontrar **memory leaks** que ocurren si apuntamos a un objeto al que ya no deberรญamos estar apuntando... ver [liga3](https://en.wikipedia.org/wiki/Memory_leak) para saber quรฉ son las memory leaks.
###Code
import math
from guppy import hpy
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
hp=hpy()
h_hat=(b-a)/n
h=hp.heap()
print("beginning of Rcf")
print(h)
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
h=hp.heap()
print("After creating list")
print(h)
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
h=hp.heap()
print("After loop")
print(h)
return h_hat*sum_res
Rcf(f,0,1,n)
import math
from guppy import hpy
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
hp=hpy()
h_hat=(b-a)/n
h=hp.heap()
print("beginning of Rcf3")
print(h)
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
h=hp.heap()
print("After creating generator")
print(h)
suma_res = sum((f(node) for node in nodes))
h=hp.heap()
print("After loop")
print(h)
return h_hat*suma_res
Rcf3(f,0,1,n)
###Output
beginning of Rcf3
Partition of a set of 451930 objects. Total size = 56178992 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123040 27 9506016 17 26823863 48 tuple
2 54025 12 4265717 8 31089580 55 bytes
3 27255 6 3942936 7 35032516 62 types.CodeType
4 25716 6 3497376 6 38529892 69 function
5 3155 1 3112744 6 41642636 74 type
6 6819 2 2830712 5 44473348 79 dict (no owner)
7 1244 0 1935072 3 46408420 83 dict of module
8 3155 1 1578376 3 47986796 85 dict of type
9 2286 1 846912 2 48833708 87 set
<1047 more rows. Type e.g. '_.more' to view.>
After creating generator
Partition of a set of 451952 objects. Total size = 56180784 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123041 27 9506072 17 26823919 48 tuple
2 54025 12 4265717 8 31089636 55 bytes
3 27255 6 3942936 7 35032572 62 types.CodeType
4 25716 6 3497376 6 38529948 69 function
5 3155 1 3112744 6 41642692 74 type
6 6820 2 2830952 5 44473644 79 dict (no owner)
7 1244 0 1935072 3 46408716 83 dict of module
8 3155 1 1578376 3 47987092 85 dict of type
9 2286 1 846912 2 48834004 87 set
<1049 more rows. Type e.g. '_.more' to view.>
After loop
Partition of a set of 451944 objects. Total size = 56179648 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123040 27 9506016 17 26823863 48 tuple
2 54025 12 4265717 8 31089580 55 bytes
3 27255 6 3942936 7 35032516 62 types.CodeType
4 25716 6 3497376 6 38529892 69 function
5 3155 1 3112744 6 41642636 74 type
6 6819 2 2830712 5 44473348 79 dict (no owner)
7 1244 0 1935072 3 46408420 83 dict of module
8 3155 1 1578376 3 47986796 85 dict of type
9 2286 1 846912 2 48833708 87 set
<1047 more rows. Type e.g. '_.more' to view.>
###Markdown
**Notas para contenedor de docker:** Comando de docker para ejecuciรณn de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.```docker run --rm -v :/datos --name jupyterlab_numerical -p 8888:8888 -d palmoreck/jupyterlab_numerical:1.1.0```password para jupyterlab: `qwerty`Detener el contenedor de docker:```docker stop jupyterlab_numerical``` Documentaciรณn de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Esta nota utiliza mรฉtodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) Instalamos las herramientas que nos ayudarรกn al perfilamiento:
###Code
%pip install -q --user line_profiler
%pip install -q --user memory_profiler
%pip install -q --user psutil
%pip install -q --user guppy3
###Output
[33mWARNING: You are using pip version 19.3.1; however, version 20.0.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.[0m
Note: you may need to restart the kernel to use updated packages.
###Markdown
La siguiente celda reiniciarรก el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook.
###Code
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import math
from scipy.integrate import quad
###Output
_____no_output_____
###Markdown
Perfilamiento en Python En esta nota revisamos algunas herramientas de Python para perfilamiento de cรณdigo: uso de cpu y memoria.Mediciรณn de tiempos con:* Mรณdulo [time](https://docs.python.org/3/library/time.htmltime.time) de Python.* [%time](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-time) de comandos de magic <- esta herramienta es sรณlo para medir tiempos de un statement y sรณlo la coloco para referencia pero no se usarรก en la nota.* [/usr/bin/time](https://en.wikipedia.org/wiki/Time_(Unix)) de `Unix`.* [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-timeit) de comandos de magic.Perfilamiento:* De CPU con: [line_profiler](https://pypi.org/project/line-profiler/), [CProfile](https://docs.python.org/2/library/profile.html) que es `built-in` en la *standard-library* de Python.* De memoria con: [memory_profiler](https://pypi.org/project/memory-profiler/) y [heapy](https://pypi.org/project/guppy/). Mediciรณn de tiempos El primer acercamiento que usamos en la nota para perfilar nuestro cรณdigo es identificar quรฉ es lento, otras mediciones son la cantidad de RAM, el I/O en disco o network. 1) Uso de `time`
###Code
import time
###Output
_____no_output_____
###Markdown
Regla compuesta del rectรกngulo **Ejemplo de implementaciรณn de regla compuesta de rectรกngulo: usando math** Utilizar la regla compuesta del rectรกngulo para aproximar la integral $\int_0^1e^{-x^2}dx$ con $10^6$ subintervalos.
###Code
f=lambda x: math.exp(-x**2) #using math library
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
n=10**6
start_time = time.time()
aprox=Rcf(f,0,1,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf tomรณ",secs,"segundos" )
###Output
Rcf tomรณ 0.3661477565765381 segundos
###Markdown
**Obs:** recuรฉrdese que hay que evaluar que se estรฉ resolviendo correctamente el problema. En este caso el error relativo nos ayuda
###Code
def err_relativo(aprox, obj):
return math.fabs(aprox-obj)/math.fabs(obj) #obsรฉrvese el uso de la librerรญa math
obj, err = quad(f, 0, 1)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
**Comentarios:*** Tรณmese en cuenta que al medir tiempos de ejecuciรณn, siempre hay variaciรณn en la mediciรณn. Tal variaciรณn es normal.* Considรฉrese que la mรกquina en la que se estรกn corriendo las pruebas puede estar realizando otras tareas mientras se ejecuta el cรณdigo, por ejemplo acceso a la red, al disco o a la RAM. Por ello, son factores que pueden causar variaciรณn en el tiempo de ejecuciรณn del programa.* Si se van a realizar reportes de tiempos, es importante indicar las caracterรญsticas de la mรกquina en la que se estรกn haciendo las pruebas, p.ej: Dell E6420 con un procesador Intel Core I7-2720QM (2.20 GHz, 6 MB cache, Quad Core) y 8 GB de RAM en un Ubuntu $13.10$. 2) Uso de `/usr/bin/time` de Unix Para la lรญnea de comando `/usr/bin/time` primero escribimos el siguiente archivo en la ruta donde se encuentra este notebook con la lรญnea de comando magic `%file`
###Code
%%file Rcf.py
import math
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n-1 and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n)))
###Output
Writing Rcf.py
###Markdown
Ver [liga](https://stackoverflow.com/questions/419163/what-does-if-name-main-do) y [liga2](https://es.stackoverflow.com/questions/32165/qu%C3%A9-es-if-name-main) para saber quรฉ es lo que hace la lรญnea `if __name__ == "__main__":` Lo siguiente es necesario si no tienen instalado el comando `/usr/bin/time`:
###Code
%%bash
sudo apt-get install time
%%bash
/usr/bin/time -p python3 Rcf.py #la p es de portabilidad,
#ver: http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html
#para mayor informaciรณn
###Output
aproximaciรณn: 7.468241e-01
###Markdown
**Comentarios:*** `real` que mide el wall clock o elapsed time.* `user` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ para funciones que no estรกn relacionadas con el kernel* del sistema.* `sys` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ en funciones a nivel de kernel del sistema.\*Ver [kernel operating system](https://en.wikipedia.org/wiki/Kernel_(operating_system)) para definiciรณn del kernel de una mรกquina.**Obs:** Una funciรณn relacionada con el kernel del sistema es el alojamiento de memoria al crear una variable. Otras son las instrucciones relacionadas con el I/O como leer de la memoria, disco o network.* La ventaja de `/usr/bin/time` es que no es especรญfico de Python.* Este comando incluye el tiempo que le toma al sistema iniciar el ejecutable de python (que puede ser significativo si se inician muchos procesos vs un sรณlo proceso). En el caso de tener short-running scripts donde el tiempo de inicio es significativo del tiempo total entonces `/usr/bin/time` puede ser una medida รบtil.**Nota:** Si se suma `user` con `sys` se tiene una idea de cuรกnto tiempo se gastรณ en la CPU y la diferencia entre este resultado y `real` da una idea de cuรกnto tiempo se gastรณ para I/O o tambiรฉn puede dar una idea de la cantidad de tiempo que se ocupรณ el sistema en correr otras tareas. * Se puede utilizar la flag `verbose` para obtener mรกs informaciรณn:
###Code
%%bash
/usr/bin/time --verbose python3 Rcf.py
###Output
aproximaciรณn: 7.468241e-01
###Markdown
y una explicaciรณn (breve) del output se puede encontrar [aquรญ](http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html). Para el caso de `Major (requiring I/O)` nos interesa que sea $0$ pues indica que el sistema operativo tiene que cargar pรกginas de datos del disco pues tales datos ya no residen en RAM (por alguna razรณn). 3) Uso de `%timeit` El mรณdulo de `timeit` es otra forma de medir el tiempo de ejecuciรณn en la CPU.**Nota:** el mรณdulo de `timeit` desabilita temporalmente el garbage collector* de Python (esto es, no habrรก desalojamiento en memoria de objetos de Python que no se utilicen). Si el garbage collector es invocado en tus operaciones para un ejemplo del mundo real, esto puede ser una razรณn de posibles diferencias que obtengas en las mediciones de tiempo. *sugiero buscar quรฉ es el garbage collector en blogs, por ejemplo: [liga](https://rushter.com/blog/python-garbage-collector/) o [liga2](https://stackify.com/python-garbage-collection/) o [liga3](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation).
###Code
%timeit?
%timeit -n 5 -r 10 Rcf(f,0,1,n)
###Output
333 ms ยฑ 11.1 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
para este caso se estรก ejecutando la funciรณn `Rcf` en un loop de tamaรฑo $5$, se estรกn promediando los tiempos de las $5$ ejecuciones y calculando su desviaciรณn estรกndar y al repetir esto $10$ veces se estรก reportando el mejor resultado. $ms$ es milisecond, $\mu s$ es microsecond y $ns$ es nanosecond. **Comentarios:*** `timeit` se recomienda usar para secciones de cรณdigo pequeรฑas. Para secciones mรกs grandes tรญpicamente modificar el valor de $n$ (ejecutar el cรณdigo n veces en un loop) resulta en mediciones distintas.* Ejecuta `timeit` varias ocasiones para asegurarse que se obtienen tiempos similares. Si observas una gran variaciรณn en las mediciones de tiempo entre distintas repeticiones de `timeit`, realiza mรกs repeticiones hasta tener un resultado estable. Mediciรณn de uso de CPU 1) Uso de cProfile `cProfile` es una herramienta **built-in** en la standard library para perfilamiento. Se utiliza con la implementaciรณn `CPython` de `Python` (ver [liga](https://stackoverflow.com/questions/17130975/python-vs-cpython) para explicaciรณn de implementaciones de Python) para medir el tiempo de ejecuciรณn de cada funciรณn en el programa. Se ejecuta desde la lรญnea de comandos o con un comando de magic. La flag `-s` indica que se ordene el resultado por el tiempo acumulado dentro de cada funciรณn. El output siguiente de `cProfile` muestra:* El tiempo total de ejecuciรณn, el cual incluye el tiempo del bloque de cรณdigo que estamos midiendo y el overhead al usar `cProfile`. Por esta razรณn se tiene un mayor tiempo de ejecuciรณn que con las mediciones de tiempo anteriores.* La columna `ncalls` que como el nombre indica, muestra el nรบmero de veces que se llamรณ a cada funciรณn. En este caso las funciones `lambda` y `math.exp` son las que se llaman un mayor nรบmero de veces: $n=10^6$ veces. La columna`tottime` muestra el tiempo que tardaron estas funciones en ejecutarse (sin llamar a otras funciones).* La columna `percall` es el cociente entre `tottime` y `ncalls`.* La columna `cumtime` contiene el tiempo gastado en la funciรณn y en las demรกs que llama. Por ejemplo la funciรณn `Rcf` llama a `listcomp` por lo que es natural que `Rcf` estรฉ mรกs arriba en el output ordenado de `cProfile`. Esto tambiรฉn ocurre con `lambda` y `math.exp` pues la primera llama a la segunda.* La columna de `percall` es un cociente entre la columna `cumtime` y el llamado a primitivas.* La รบltima columna indica informaciรณn de la funciรณn y la lรญnea en la que se encuentra dentro del cรณdigo. Por ejemplo la lรญnea $1$ de mรณdulo es el llamado a la funciรณn `__main__`. La lรญnea $2$ es el llamado a la funciรณn `Rcf`. Por lo que es prรกcticamente negligible el llamado a `__main__`.
###Code
%%bash
python3 -m cProfile -s cumulative Rcf.py
###Output
aproximaciรณn: 7.468241e-01
2000068 function calls in 0.649 seconds
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.649 0.649 {built-in method builtins.exec}
1 0.021 0.021 0.649 0.649 Rcf.py:1(<module>)
1 0.155 0.155 0.627 0.627 Rcf.py:2(Rcf)
1000000 0.252 0.000 0.343 0.000 Rcf.py:23(<lambda>)
1 0.129 0.129 0.129 0.129 Rcf.py:16(<listcomp>)
1000000 0.090 0.000 0.090 0.000 {built-in method math.exp}
1 0.000 0.000 0.001 0.001 <frozen importlib._bootstrap>:966(_find_and_load)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
2 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 0.000 0.000 {built-in method _imp.create_builtin}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
1 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:103(release)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
1 0.000 0.000 0.000 0.000 {built-in method builtins.print}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
2 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
2 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
1 0.000 0.000 0.000 0.000 {built-in method builtins.any}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
4 0.000 0.000 0.000 0.000 {built-in method builtins.getattr}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:369(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message)
2 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects}
2 0.000 0.000 0.000 0.000 {method 'rpartition' of 'str' objects}
1 0.000 0.000 0.000 0.000 {built-in method _imp.is_builtin}
4 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:307(__init__)
4 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:143(__init__)
3 0.000 0.000 0.000 0.000 {built-in method _imp.acquire_lock}
3 0.000 0.000 0.000 0.000 {built-in method _imp.release_lock}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 {built-in method _imp.exec_builtin}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:424(has_location)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:753(is_package)
###Markdown
**Nota:** Recordar que el output de `CProfile` con la flag `-s cumulative` estรก ordenando por el gasto en tiempo de las funciones que son llamadas en el bloque de cรณdigo analizado. No estรก ordenando por parent functions. Para tener un output en el que se tenga quรฉ funciones llaman a quรฉ otras se puede utilizar lo siguiente:
###Code
%%bash
python3 -m cProfile -o profile.stats Rcf.py
import pstats
p = pstats.Stats("profile.stats")
p.sort_stats("cumulative")
p.print_stats()
p.print_callers()
###Output
Ordered by: cumulative time
Function was called by...
ncalls tottime cumtime
{built-in method builtins.exec} <-
Rcf.py:1(<module>) <- 1 0.014 0.587 {built-in method builtins.exec}
Rcf.py:2(Rcf) <- 1 0.131 0.573 Rcf.py:1(<module>)
Rcf.py:23(<lambda>) <- 1000000 0.218 0.300 Rcf.py:2(Rcf)
Rcf.py:16(<listcomp>) <- 1 0.142 0.142 Rcf.py:2(Rcf)
{built-in method math.exp} <- 1000000 0.082 0.082 Rcf.py:23(<lambda>)
<frozen importlib._bootstrap>:966(_find_and_load) <- 1 0.000 0.001 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:936(_find_and_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:651(_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
<frozen importlib._bootstrap>:564(module_from_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:728(create_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
<frozen importlib._bootstrap>:211(_call_with_frames_removed) <- 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
{built-in method _imp.create_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:147(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:870(_find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
<frozen importlib._bootstrap>:157(_get_module_lock) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
<frozen importlib._bootstrap>:707(find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{method 'format' of 'str' objects} <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:433(spec_from_loader) <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
<frozen importlib._bootstrap>:58(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
<frozen importlib._bootstrap>:504(_init_module_attrs) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
{built-in method builtins.print} <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:318(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:151(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:78(acquire) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
<frozen importlib._bootstrap>:103(release) <- 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
<frozen importlib._bootstrap>:232(_requires_builtin_wrapper) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
{built-in method builtins.hasattr} <- 2 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
{built-in method builtins.getattr} <- 4 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
{built-in method builtins.any} <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
<frozen importlib._bootstrap>:369(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
<frozen importlib._bootstrap>:416(parent) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:736(exec_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:176(cb) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
{built-in method _imp.is_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
<frozen importlib._bootstrap>:311(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:143(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:222(_verbose_message) <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
{method 'get' of 'dict' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:307(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:321(<genexpr>) <- 4 0.000 0.000 {built-in method builtins.any}
<frozen importlib._bootstrap>:847(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{method 'rpartition' of 'str' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
{built-in method _thread.allocate_lock} <- 2 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
{built-in method _thread.get_ident} <- 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 <frozen importlib._bootstrap>:103(release)
{built-in method _imp.acquire_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
{built-in method _imp.release_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
{built-in method _imp.exec_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:843(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{method 'disable' of '_lsprof.Profiler' objects} <-
<frozen importlib._bootstrap>:424(has_location) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:753(is_package) <- 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
###Markdown
y podemos tambiรฉn tener la informaciรณn de a quรฉ funciones llamรณ cada funciรณn
###Code
p.print_callees()
###Output
Ordered by: cumulative time
Function called...
ncalls tottime cumtime
{built-in method builtins.exec} -> 1 0.014 0.587 Rcf.py:1(<module>)
Rcf.py:1(<module>) -> 1 0.000 0.001 <frozen importlib._bootstrap>:966(_find_and_load)
1 0.131 0.573 Rcf.py:2(Rcf)
1 0.000 0.000 {built-in method builtins.print}
1 0.000 0.000 {method 'format' of 'str' objects}
Rcf.py:2(Rcf) -> 1 0.142 0.142 Rcf.py:16(<listcomp>)
1000000 0.218 0.300 Rcf.py:23(<lambda>)
Rcf.py:23(<lambda>) -> 1000000 0.082 0.082 {built-in method math.exp}
Rcf.py:16(<listcomp>) ->
{built-in method math.exp} ->
<frozen importlib._bootstrap>:966(_find_and_load) -> 1 0.000 0.000 <frozen importlib._bootstrap>:143(__init__)
1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
1 0.000 0.000 {method 'get' of 'dict' objects}
<frozen importlib._bootstrap>:936(_find_and_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
1 0.000 0.000 {method 'rpartition' of 'str' objects}
<frozen importlib._bootstrap>:651(_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:307(__init__)
1 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
1 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:564(module_from_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:728(create_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:211(_call_with_frames_removed) -> 1 0.000 0.000 {built-in method _imp.create_builtin}
1 0.000 0.000 {built-in method _imp.exec_builtin}
{built-in method _imp.create_builtin} ->
<frozen importlib._bootstrap>:147(__enter__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
<frozen importlib._bootstrap>:870(_find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
<frozen importlib._bootstrap>:157(_get_module_lock) -> 1 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
1 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 {built-in method _imp.release_lock}
<frozen importlib._bootstrap>:707(find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 {built-in method _imp.is_builtin}
{method 'format' of 'str' objects} ->
<frozen importlib._bootstrap>:433(spec_from_loader) -> 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
1 0.000 0.000 <frozen importlib._bootstrap>:369(__init__)
2 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:58(__init__) -> 2 0.000 0.000 {built-in method _thread.allocate_lock}
<frozen importlib._bootstrap>:504(_init_module_attrs) -> 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 <frozen importlib._bootstrap>:424(has_location)
4 0.000 0.000 {built-in method builtins.getattr}
{built-in method builtins.print} ->
<frozen importlib._bootstrap>:318(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message)
1 0.000 0.000 {built-in method builtins.any}
<frozen importlib._bootstrap>:151(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:103(release)
<frozen importlib._bootstrap>:78(acquire) -> 1 0.000 0.000 {built-in method _thread.get_ident}
<frozen importlib._bootstrap>:103(release) -> 1 0.000 0.000 {built-in method _thread.get_ident}
<frozen importlib._bootstrap>:232(_requires_builtin_wrapper) -> 1 0.000 0.000 <frozen importlib._bootstrap>:753(is_package)
{built-in method builtins.hasattr} ->
{built-in method builtins.getattr} ->
{built-in method builtins.any} -> 4 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>)
<frozen importlib._bootstrap>:369(__init__) ->
<frozen importlib._bootstrap>:416(parent) -> 1 0.000 0.000 {method 'rpartition' of 'str' objects}
<frozen importlib._bootstrap>:736(exec_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:176(cb) -> 1 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 {built-in method _imp.release_lock}
1 0.000 0.000 {method 'get' of 'dict' objects}
{built-in method _imp.is_builtin} ->
<frozen importlib._bootstrap>:311(__enter__) ->
<frozen importlib._bootstrap>:143(__init__) ->
<frozen importlib._bootstrap>:222(_verbose_message) ->
{method 'get' of 'dict' objects} ->
<frozen importlib._bootstrap>:307(__init__) ->
<frozen importlib._bootstrap>:321(<genexpr>) ->
<frozen importlib._bootstrap>:847(__exit__) -> 1 0.000 0.000 {built-in method _imp.release_lock}
{method 'rpartition' of 'str' objects} ->
{built-in method _thread.allocate_lock} ->
{built-in method _thread.get_ident} ->
{built-in method _imp.acquire_lock} ->
{built-in method _imp.release_lock} ->
{built-in method _imp.exec_builtin} ->
<frozen importlib._bootstrap>:843(__enter__) -> 1 0.000 0.000 {built-in method _imp.acquire_lock}
{method 'disable' of '_lsprof.Profiler' objects} ->
<frozen importlib._bootstrap>:424(has_location) ->
<frozen importlib._bootstrap>:753(is_package) ->
###Markdown
El comando de magic es `%prun`:
###Code
%prun -s cumulative Rcf(f,0,1,n)
###Output
###Markdown
2) Uso de line_profiler `line_profiler` trabaja perfilando el cรณdigo de forma individual funciones lรญnea por lรญnea. La idea serรญa perfilar primero con `CProfile` al programa para identificar aquellas funciones que gastan un mayor tiempo de ejecuciรณn y posteriormente perfilarlas con `line_profiler`. **Comentario:** una buena prรกctica es guardar las diferentes versiones de tu cรณdigo cuando vas modificรกndolo para tener un registro de tus cambios. Puede ejecutarse desde la lรญnea de comandos o cargarse en IPython con el comando magic `load_ext`:
###Code
%load_ext line_profiler
%lprun?
###Output
_____no_output_____
###Markdown
En el siguiente output:* La columna `%Time` contiene el porcentaje de tiempo gastado. En el caso que se perfila, la lรญnea`sum_res=sum_res+f(node)` es en la que mรกs porcentaje del tiempo se gasta. Seguida de la lรญnea del `for` y de la lรญnea donde se hace uso de [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions) para crear a los nodos de integraciรณn numรฉrica.
###Code
%lprun -f Rcf Rcf(f,0,1,n)
###Output
_____no_output_____
###Markdown
Con la evidencia generada con `line_profiler` ยฟpodrรญamos escribir una funciรณn que fuera mรกs rรกpida? Lo primero que podemos hacer es utilizar un [generator](https://wiki.python.org/moin/Generators) en lugar de una lista:
###Code
def Rcf2(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf2 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
###Output
_____no_output_____
###Markdown
medir con `%timeit`:
###Code
%timeit -n 5 -r 10 Rcf2(f,0,1,n)
aprox=Rcf2(f,0,1,n)
###Output
_____no_output_____
###Markdown
revisar que estรก correcta esta nueva implementaciรณn:
###Code
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
perfilarla con `line_profiler`:
###Code
%lprun -f Rcf2 Rcf2(f,0,1,n)
###Output
_____no_output_____
###Markdown
y observar que la lรญnea en la que se creaba la lista ahora es despreciable el porcentaje de tiempo que se gasta en ella. Podemos hacer una implementaciรณn que se encargue del gasto del tiempo en la lรญnea del `for`:
###Code
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum((f(node) for node in nodes))
return h_hat*suma_res
###Output
_____no_output_____
###Markdown
medir con `%timeit`:
###Code
%timeit -n 5 -r 10 Rcf3(f,0,1,n)
###Output
363 ms ยฑ 4.99 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
revisar que estรก correcta esta nueva implementaciรณn:
###Code
aprox=Rcf3(f,0,1,n)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
perfilarla con `line_profiler`:
###Code
%lprun -f Rcf3 Rcf3(f,0,1,n)
###Output
_____no_output_____
###Markdown
y se tiene la mayorรญa del porcentaje de tiempo ahora en una sola lรญnea. Recuรฉrdese que el resultado de `Cprofile` indicรณ que se llama a la funciรณn `lambda` y `math.exp` $n=10^6$ veces. Una implementaciรณn de la regla del rectรกngulo con menor nรบmero de llamadas a funciones (y por tanto menor tiempo) serรญa:
###Code
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
%lprun -f Rcf4 Rcf4(0,1,n)
%timeit -n 5 -r 10 Rcf4(0,1,n)
aprox=Rcf4(0,1,n)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
Si bien esta implementaciรณn es la mรกs rรกpida hasta este punto no es tan flexible pues estรก calculando la regla del rectรกngulo para una funciรณn definida dentro de la misma funciรณn. Si quisiรฉramos calcular la regla para otra funciรณn se tendrรญa que directamente modificar la funciรณn `Rcf` lo cual no es flexible. Aunque `Rcf4` es mรกs rรกpida preferimos `Rcf3` por su flexibilidad y menor uso de recursos (que se verรก con el `memory_profiler` mรกs adelante).
###Code
def Rcf5(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n-1
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
f_nodes=(math.exp(-(a+(i+1/2)*h_hat)**2) for i in range(0,n))
suma_res = sum(f_nodes)
return h_hat*suma_res
%lprun -f Rcf5 Rcf5(0,1,n)
%timeit -n 5 -r 10 Rcf5(0,1,n)
aprox=Rcf5(0,1,n)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
Obsรฉrvese que en una lรญnea se estรกn construyendo nodos y transformando con `math.exp` en `Rcf5`. Aunque esta implementaciรณn es la mรกs rรกpida hasta ahora, no se sugiere usarla pues le falta flexibilidad como `Rcf4` y no es recomendable en una lรญnea construir datos y transformarlos. Combinar operaciones en una sola lรญnea resulta en cรณdigo difรญcil de leer. Es mejor separar en dos funciones estas dos tareas por si falla una sepamos cuรกl fallรณ y por quรฉ fallรณ. **Ejemplo de ejecuciรณn de line_profiler desde la lรญnea de comandos:**
###Code
%%file Rcf4.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con line_profiler
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
if __name__ == "__main__":
n=10**6
print("aproximaciรณn: {:0.6e}".format(Rcf4(0,1,n)))
%%bash
$HOME/.local/bin/kernprof -l -v Rcf4.py
###Output
aproximaciรณn: 7.468241e-01
Wrote profile results to Rcf4.py.lprof
Timer unit: 1e-06 s
Total time: 0.77406 s
File: Rcf4.py
Function: Rcf4 at line 2
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2 @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con line_profiler
4 def Rcf4(a,b,n):
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
9 Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors
10 Args:
11 f (lambda expression): lambda expression of integrand
12 a (int): left point of interval
13 b (int): right point of interval
14 n (int): number of subintervals
15 Returns:
16 Rcf4 (float)
17 """
18 1 2.0 2.0 0.0 h_hat=(b-a)/n
19 1 5.0 5.0 0.0 nodes=(a+(i+1/2)*h_hat for i in range(0,n))
20 1 774051.0 774051.0 100.0 suma_res = sum(((math.exp(-node**2) for node in nodes)))
21 1 2.0 2.0 0.0 return h_hat*suma_res
###Markdown
Observese en el output de `CProfile` siguiente para la funciรณn `Rcf4` que las lรญneas con mayor gasto en el tiempo total son: ``` nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes)))```
###Code
import math
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n-1 to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
%prun -s cumulative Rcf4(0,1,n)
###Output
###Markdown
Uso de memoria RAM Al realizar anรกlisis del uso de memoria de tu cรณdigo podemos responder preguntas como:* ยฟEs posible utilizar menos RAM al reescribir mi funciรณn para que trabaje mรกs eficientemente?* ยฟPodemos usar mรกs RAM para aprovechar mejor el uso del cachรฉ? 1)Uso de `%memit` Es equivalente a `%timeit` en el sentido que realiza una serie de repeticiones para obtener un resultado estable del bloque de cรณdigo analizado.
###Code
%load_ext memory_profiler
%memit?
###Output
_____no_output_____
###Markdown
Primero medimos cuรกnto RAM estรก utilizando el proceso del notebook:
###Code
%memit #how much RAM this process is consuming
###Output
peak memory: 119.73 MiB, increment: 0.00 MiB
###Markdown
Y podemos realizar mediciones para cada una de las implementaciones de la regla del rectรกngulo:
###Code
%memit -c Rcf(f,0,1,n)
%memit -c Rcf2(f,0,1,n)
%memit -c Rcf3(f,0,1,10**5)
%memit -c Rcf4(0,1,10**5)
%memit -c Rcf5(0,1,10**5)
###Output
peak memory: 228.77 MiB, increment: 103.05 MiB
###Markdown
El uso de `generators` nos ayuda a disminuir la cantidad de memoria RAM usada por nuestro proceso. 2) Uso de `memory_profiler` Para mediciรณn de memoria lรญnea por lรญnea utilizamos `memory_profiler`. Se ejecuta mรกs lento que `line_profiler` (entre $10$ y $100$ veces mรกs lento!) y mejora su velocidad de ejecuciรณn al instalar el paquete `psutil`. Con lรญnea de comandos se ejecuta como sigue:
###Code
%%file Rcf_memory_profiler.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con memory_profiler
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n)))
###Output
Writing Rcf_memory_profiler.py
###Markdown
En el output siguiente se observa que la lรญnea que mรกs incrementa la cantidad de RAM alojada para el proceso que contiene la ejecuciรณn de la funciรณn `Rcf` es la creaciรณn de la lista de nodos `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`. **Cuidado:** el valor de la columna `Increment` para esta lรญnea no necesariamente indica que la lista `nodes` ocupa en memoria $512 MB$'s, sรณlo que para la alocaciรณn de la lista el proceso creciรณ en $512 MB$'s**Nota:** en el output aparece $MiB$ que son mebibytes. Aunque no se cumple que un mebibyte sea igual a un megabyte, se toma en este comentario como megabytes pues la diferencia entre estas unidades es sutil.
###Code
%%bash
python3 -m memory_profiler Rcf_memory_profiler.py
###Output
aproximaciรณn: 7.468241e-01
Filename: Rcf_memory_profiler.py
Line # Mem usage Increment Line Contents
================================================
2 37.750 MiB 37.750 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con memory_profiler
4 def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
9 Args:
10 f (lambda expression): lambda expression of integrand
11 a (int): left point of interval
12 b (int): right point of interval
13 n (int): number of subintervals
14 Returns:
15 Rcf (float)
16 """
17 37.750 MiB 0.000 MiB h_hat=(b-a)/n
18 69.012 MiB 0.512 MiB nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
19 69.012 MiB 0.000 MiB sum_res=0
20 69.012 MiB 0.000 MiB for node in nodes:
21 69.012 MiB 0.000 MiB sum_res=sum_res+f(node)
22 69.012 MiB 0.000 MiB return h_hat*sum_res
###Markdown
Como ya se habรญa notado, los generators ahorran memoria:
###Code
%%file Rcf3_memory_profiler.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con memory_profiler
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum((f(node) for node in nodes))
return h_hat*suma_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf3
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf3(f,0,1,n)))
###Output
Writing Rcf3_memory_profiler.py
###Markdown
En el output siguiente el proceso que involucra la ejecuciรณn de la funciรณn `Rcf3` no incrementa el uso de memoria RAM por el uso de generators:
###Code
%%bash
python3 -m memory_profiler Rcf3_memory_profiler.py
###Output
aproximaciรณn: 7.468241e-01
Filename: Rcf3_memory_profiler.py
Line # Mem usage Increment Line Contents
================================================
2 37.590 MiB 37.590 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con memory_profiler
4 def Rcf3(f,a,b,n):
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
9 Args:
10 f (lambda expression): lambda expression of integrand
11 a (int): left point of interval
12 b (int): right point of interval
13 n (int): number of subintervals
14 Returns:
15 Rcf3 (float)
16 """
17 37.590 MiB 0.000 MiB h_hat=(b-a)/n
18 37.590 MiB 0.000 MiB nodes=(a+(i+1/2)*h_hat for i in range(0,n))
19 37.590 MiB 0.000 MiB suma_res = sum((f(node) for node in nodes))
20 37.590 MiB 0.000 MiB return h_hat*suma_res
###Markdown
3) Uso de heapy Con `heapy` podemos revisar el nรบmero y tamaรฑo de cada objeto que estรก en el heap de Python (ver [liga](https://docs.python.org/3/c-api/memory.html) y [liga2](https://stackoverflow.com/questions/14546178/does-python-have-a-stack-heap-and-how-is-memory-managed) para memory management). Tambiรฉn ayuda a encontrar **memory leaks** que ocurren si apuntamos a un objeto al que ya no deberรญamos estar apuntando... ver [liga3](https://en.wikipedia.org/wiki/Memory_leak) para saber quรฉ son las memory leaks.
###Code
import math
from guppy import hpy
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
hp=hpy()
h_hat=(b-a)/n
h=hp.heap()
print("beginning of Rcf")
print(h)
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
h=hp.heap()
print("After creating list")
print(h)
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
h=hp.heap()
print("After loop")
print(h)
return h_hat*sum_res
Rcf(f,0,1,n)
import math
from guppy import hpy
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
hp=hpy()
h_hat=(b-a)/n
h=hp.heap()
print("beginning of Rcf3")
print(h)
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
h=hp.heap()
print("After creating generator")
print(h)
suma_res = sum((f(node) for node in nodes))
h=hp.heap()
print("After loop")
print(h)
return h_hat*suma_res
Rcf3(f,0,1,n)
###Output
beginning of Rcf3
Partition of a set of 451930 objects. Total size = 56178992 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123040 27 9506016 17 26823863 48 tuple
2 54025 12 4265717 8 31089580 55 bytes
3 27255 6 3942936 7 35032516 62 types.CodeType
4 25716 6 3497376 6 38529892 69 function
5 3155 1 3112744 6 41642636 74 type
6 6819 2 2830712 5 44473348 79 dict (no owner)
7 1244 0 1935072 3 46408420 83 dict of module
8 3155 1 1578376 3 47986796 85 dict of type
9 2286 1 846912 2 48833708 87 set
<1047 more rows. Type e.g. '_.more' to view.>
After creating generator
Partition of a set of 451952 objects. Total size = 56180784 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123041 27 9506072 17 26823919 48 tuple
2 54025 12 4265717 8 31089636 55 bytes
3 27255 6 3942936 7 35032572 62 types.CodeType
4 25716 6 3497376 6 38529948 69 function
5 3155 1 3112744 6 41642692 74 type
6 6820 2 2830952 5 44473644 79 dict (no owner)
7 1244 0 1935072 3 46408716 83 dict of module
8 3155 1 1578376 3 47987092 85 dict of type
9 2286 1 846912 2 48834004 87 set
<1049 more rows. Type e.g. '_.more' to view.>
After loop
Partition of a set of 451944 objects. Total size = 56179648 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123040 27 9506016 17 26823863 48 tuple
2 54025 12 4265717 8 31089580 55 bytes
3 27255 6 3942936 7 35032516 62 types.CodeType
4 25716 6 3497376 6 38529892 69 function
5 3155 1 3112744 6 41642636 74 type
6 6819 2 2830712 5 44473348 79 dict (no owner)
7 1244 0 1935072 3 46408420 83 dict of module
8 3155 1 1578376 3 47986796 85 dict of type
9 2286 1 846912 2 48833708 87 set
<1047 more rows. Type e.g. '_.more' to view.>
###Markdown
Esta nota utiliza mรฉtodos vistos en [1.5.Integracion_numerica](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/I.computo_cientifico/1.5.Integracion_numerica.ipynb) **Notas para contenedor de docker:** Comando de docker para ejecuciรณn de la nota de forma local:nota: cambiar `` por la ruta de directorio que se desea mapear a `/datos` dentro del contenedor de docker.```docker run --rm -v :/datos --name jupyterlab_numerical -p 8888:8888 -p 8786:8786 -p 8787:8787 -d palmoreck/jupyterlab_numerical:1.1.0```Detener el contenedor de docker:```docker stop jupyterlab_local``` Documentaciรณn de la imagen de docker `palmoreck/jupyterlab_numerical:1.1.0` en [liga](https://github.com/palmoreck/dockerfiles/tree/master/jupyterlab/numerical). --- Instalamos las herramientas que nos ayudarรกn al perfilamiento:
###Code
%pip install -q --user line_profiler
%pip install -q --user memory_profiler
%pip install -q --user psutil
%pip install -q --user guppy3
###Output
Note: you may need to restart the kernel to use updated packages.
###Markdown
La siguiente celda reiniciarรก el kernel de **IPython** para cargar los paquetes instalados en la celda anterior. Dar **Ok** en el mensaje que salga y continuar con el contenido del notebook.
###Code
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import math
from scipy.integrate import quad
###Output
_____no_output_____
###Markdown
Perfilamiento en Python En esta nota revisamos algunas herramientas de Python para perfilamiento de cรณdigo: uso de cpu y memoria.Mediciรณn de tiempos con:* Mรณdulo [time](https://docs.python.org/3/library/time.htmltime.time) de Python.* [%time](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-time) de comandos de magic <- esta herramienta es sรณlo para medir tiempos de un statement y sรณlo la coloco para referencia pero no se usarรก en la nota.* [/usr/bin/time](https://en.wikipedia.org/wiki/Time_(Unix)) de `Unix`.* [%timeit](https://ipython.readthedocs.io/en/stable/interactive/magics.htmlmagic-timeit) de comandos de magic.Perfilamiento:* De CPU con: [line_profiler](https://pypi.org/project/line-profiler/), [CProfile](https://docs.python.org/2/library/profile.html) que es `built-in` en la *standard-library* de Python.* De memoria con: [memory_profiler](https://pypi.org/project/memory-profiler/) y [heapy](https://pypi.org/project/guppy/). Mediciรณn de tiempos El primer acercamiento que usamos en la nota para perfilar nuestro cรณdigo es identificar quรฉ es lento, otras mediciones son la cantidad de RAM, el I/O en disco o network. 1) Uso de `time`
###Code
import time
###Output
_____no_output_____
###Markdown
Regla compuesta del rectรกngulo **Ejemplo de implementaciรณn de regla compuesta de rectรกngulo: usando math** Utilizar la regla compuesta del rectรกngulo para aproximar la integral $\int_0^1e^{-x^2}dx$ con $10^6$ subintervalos.
###Code
f=lambda x: math.exp(-x**2) #using math library
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
n=10**6
start_time = time.time()
aprox=Rcf(f,0,1,n)
end_time = time.time()
secs = end_time-start_time
print("Rcf tomรณ",secs,"segundos" )
###Output
Rcf tomรณ 0.5433993339538574 segundos
###Markdown
**Obs:** recuรฉrdese que hay que evaluar que se estรฉ resolviendo correctamente el problema. En este caso el error relativo nos ayuda
###Code
def err_relativo(aprox, obj):
return math.fabs(aprox-obj)/math.fabs(obj) #obsรฉrvese el uso de la librerรญa math
obj, err = quad(f, 0, 1)
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
**Comentarios:*** Tรณmese en cuenta que al medir tiempos de ejecuciรณn, siempre hay variaciรณn en la mediciรณn. Tal variaciรณn es normal.* Considรฉrese que la mรกquina en la que se estรกn corriendo las pruebas puede estar realizando otras tareas mientras se ejecuta el cรณdigo, por ejemplo acceso a la red, al disco o a la RAM. Por ello, son factores que pueden causar variaciรณn en el tiempo de ejecuciรณn del programa.* Si se van a realizar reportes de tiempos, es importante indicar las caracterรญsticas de la mรกquina en la que se estรกn haciendo las pruebas, p.ej: Dell E6420 con un procesador Intel Core I7-2720QM (2.20 GHz, 6 MB cache, Quad Core) y 8 GB de RAM en un Ubuntu $13.10$. 2) Uso de `/usr/bin/time` de Unix Para la lรญnea de comando `/usr/bin/time` primero escribimos el siguiente archivo en la ruta donde se encuentra este notebook con la lรญnea de comando magic `%file`
###Code
%%file Rcf.py
import math
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n)))
###Output
Overwriting Rcf.py
###Markdown
Ver [liga](https://stackoverflow.com/questions/419163/what-does-if-name-main-do) y [liga2](https://es.stackoverflow.com/questions/32165/qu%C3%A9-es-if-name-main) para saber quรฉ es lo que hace la lรญnea `if __name__ == "__main__":` Lo siguiente es necesario si no tienen instalado el comando `/usr/bin/time`:
###Code
%%bash
sudo apt-get install time
%%bash
/usr/bin/time -p python3 Rcf.py #la p es de portabilidad,
#ver: http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html
#para mayor informaciรณn
###Output
aproximaciรณn: 7.468241e-01
###Markdown
**Comentarios:*** `real` que mide el wall clock o elapsed time.* `user` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ para funciones que no estรกn relacionadas con el kernel del sistema.* `sys` que mide la cantidad de tiempo de tu ejecuciรณn que la CPU gastรณ en funciones a nivel de kernel del sistema.**Obs:** Una funciรณn relacionada con el kernel del sistema es el alojamiento de memoria al crear una variable.* La ventaja de `/usr/bin/time` es que no es especรญfico de Python.* Este comando incluye el tiempo que le toma al sistema iniciar el ejecutable de python (que puede ser significativo si se inician muchos procesos vs un sรณlo proceso). En el caso de tener short-running scripts donde el tiempo de inicio es significativo del tiempo total entonces `/usr/bin/time` puede ser una medida รบtil.**Nota:** Si se suma `user` con `sys` se tiene una idea de cuรกnto tiempo se gastรณ en la CPU y la diferencia entre este resultado y `real` da una idea de cuรกnto tiempo se gastรณ para I/O o tambiรฉn puede dar una idea de la cantidad de tiempo que se ocupรณ el sistema en correr otras tareas. * Se puede utilizar la flag `verbose` para obtener mรกs informaciรณn:
###Code
%%bash
/usr/bin/time --verbose python3 Rcf.py
###Output
aproximaciรณn: 7.468241e-01
###Markdown
y una explicaciรณn (breve) del output se puede encontrar [aquรญ](http://manpages.ubuntu.com/manpages/xenial/man1/time.1.html). Para el caso de `Major (requiring I/O)` nos interesa que sea $0$ pues indica que el sistema operativo tiene que cargar pรกginas de datos del disco pues tales datos ya no residen en RAM (por alguna razรณn). 3) Uso de `%timeit` El mรณdulo de `timeit` es otra forma de medir el tiempo de ejecuciรณn en la CPU.**Nota:** el mรณdulo de `timeit` desabilita temporalmente el garbage collector* de Python (esto es, no habrรก desalojamiento en memoria de objetos de Python que no se utilicen). Si el garbage collector es invocado en tus operaciones para un ejemplo del mundo real, esto puede ser una razรณn de posibles diferencias que obtengas en las mediciones de tiempo. *sugiero buscar quรฉ es el garbage collector en blogs, por ejemplo: [liga](https://rushter.com/blog/python-garbage-collector/) o [liga2](https://stackify.com/python-garbage-collection/) o [liga3](https://stackoverflow.com/questions/4484167/python-garbage-collector-documentation).
###Code
%timeit?
%timeit -n 5 -r 10 Rcf(f,0,1,n)
###Output
407 ms ยฑ 37.3 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
para este caso se estรก ejecutando la funciรณn `Rcf` en un loop de tamaรฑo $5$, se estรกn promediando los tiempos de las $5$ ejecuciones y calculando su desviaciรณn estรกndar y al repetir esto $10$ veces se estรก reportando el mejor resultado. $ms$ es milisecond, $\mu s$ es microsecond y $ns$ es nanosecond. **Comentarios:*** `timeit` se recomienda usar para secciones de cรณdigo pequeรฑas. Para secciones mรกs grandes tรญpicamente modificar el valor de $n$ (ejecutar el cรณdigo n veces en un loop) resulta en mediciones distintas.* Ejecuta `timeit` varias ocasiones para asegurarse que se obtienen tiempos similares. Si observas una gran variaciรณn en las mediciones de tiempo entre distintas repeticiones de `timeit`, realiza mรกs repeticiones hasta tener un resultado estable. Mediciรณn de uso de CPU 1) Uso de cProfile `cProfile` es una herramienta **built-in** en la standard library para perfilamiento. Se utiliza con la implementaciรณn `CPython` de `Python` (ver [liga](https://stackoverflow.com/questions/17130975/python-vs-cpython) para explicaciรณn de implementaciones de Python) para medir el tiempo de ejecuciรณn de cada funciรณn en el programa. Se ejecuta desde la lรญnea de comandos o con un comando de magic. La flag `-s` indica que se ordene el resultado por el tiempo acumulado dentro de cada funciรณn. El output siguiente de `cProfile` muestra:* El tiempo total de ejecuciรณn, el cual incluye el tiempo del bloque de cรณdigo que estamos midiendo y el overhead al usar `cProfile`. Por esta razรณn se tiene un mayor tiempo de ejecuciรณn que con las mediciones de tiempo anteriores.* La columna `ncalls` que como el nombre indica, muestra el nรบmero de veces que se llamรณ a cada funciรณn. En este caso las funciones `lambda` y `math.exp` son las que se llaman un mayor nรบmero de veces: $n=10^6$ veces. La columna`tottime` muestra el tiempo que tardaron estas funciones en ejecutarse (sin llamar a otras funciones).* La columna `percall` es el cociente entre `tottime` y `ncalls`.* La columna `cumtime` contiene el tiempo gastado en la funciรณn y en las demรกs que llama. Por ejemplo la funciรณn `Rcf` llama a `listcomp` por lo que es natural que `Rcf` estรฉ mรกs arriba en el output ordenado de `cProfile`. Esto tambiรฉn ocurre con `lambda` y `math.exp` pues la primera llama a la segunda.* La columna de `percall` es un cociente entre la columna `cumtime` y el llamado a primitivas.* La รบltima columna indica informaciรณn de la funciรณn y la lรญnea en la que se encuentra dentro del cรณdigo. Por ejemplo la lรญnea $1$ de mรณdulo es el llamado a la funciรณn `__main__`. La lรญnea $2$ es el llamado a la funciรณn `Rcf`. Por lo que es prรกcticamente negligible el llamado a `__main__`.
###Code
%%bash
python3 -m cProfile -s cumulative Rcf.py
###Output
aproximaciรณn: 7.468241e-01
2000068 function calls in 0.638 seconds
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.638 0.638 {built-in method builtins.exec}
1 0.013 0.013 0.638 0.638 Rcf.py:1(<module>)
1 0.152 0.152 0.625 0.625 Rcf.py:2(Rcf)
1000000 0.250 0.000 0.342 0.000 Rcf.py:23(<lambda>)
1 0.130 0.130 0.130 0.130 Rcf.py:16(<listcomp>)
1000000 0.093 0.000 0.093 0.000 {built-in method math.exp}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
2 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
1 0.000 0.000 0.000 0.000 {built-in method _imp.create_builtin}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
1 0.000 0.000 0.000 0.000 {built-in method builtins.print}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:103(release)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
4 0.000 0.000 0.000 0.000 {built-in method builtins.getattr}
4 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:369(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
2 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock}
3 0.000 0.000 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 0.000 0.000 {built-in method builtins.any}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message)
2 0.000 0.000 0.000 0.000 {method 'rpartition' of 'str' objects}
3 0.000 0.000 0.000 0.000 {built-in method _imp.release_lock}
1 0.000 0.000 0.000 0.000 {built-in method _imp.is_builtin}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:307(__init__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__)
4 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:143(__init__)
2 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects}
2 0.000 0.000 0.000 0.000 {built-in method _thread.get_ident}
1 0.000 0.000 0.000 0.000 {built-in method _imp.exec_builtin}
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:424(has_location)
1 0.000 0.000 0.000 0.000 <frozen importlib._bootstrap>:753(is_package)
###Markdown
**Nota:** Recordar que el output de `CProfile` con la flag `-s cumulative` estรก ordenando por el gasto en tiempo de las funciones que son llamadas en el bloque de cรณdigo analizado. No estรก ordenando por parent functions. Para tener un output en el que se tenga quรฉ funciones llaman a quรฉ otras se puede utilizar lo siguiente:
###Code
%%bash
python3 -m cProfile -o profile.stats Rcf.py
import pstats
p = pstats.Stats("profile.stats")
p.sort_stats("cumulative")
p.print_stats()
p.print_callers()
###Output
Ordered by: cumulative time
Function was called by...
ncalls tottime cumtime
{built-in method builtins.exec} <-
Rcf.py:1(<module>) <- 1 0.014 0.630 {built-in method builtins.exec}
Rcf.py:2(Rcf) <- 1 0.161 0.616 Rcf.py:1(<module>)
Rcf.py:23(<lambda>) <- 1000000 0.250 0.342 Rcf.py:2(Rcf)
Rcf.py:16(<listcomp>) <- 1 0.113 0.113 Rcf.py:2(Rcf)
{built-in method math.exp} <- 1000000 0.092 0.092 Rcf.py:23(<lambda>)
<frozen importlib._bootstrap>:966(_find_and_load) <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:936(_find_and_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:651(_load_unlocked) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
<frozen importlib._bootstrap>:564(module_from_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:147(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:728(create_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
<frozen importlib._bootstrap>:211(_call_with_frames_removed) <- 1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
{built-in method _imp.create_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:157(_get_module_lock) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
<frozen importlib._bootstrap>:78(acquire) <- 1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
{method 'format' of 'str' objects} <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:870(_find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
<frozen importlib._bootstrap>:707(find_spec) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
<frozen importlib._bootstrap>:433(spec_from_loader) <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
{built-in method builtins.print} <- 1 0.000 0.000 Rcf.py:1(<module>)
<frozen importlib._bootstrap>:504(_init_module_attrs) <- 1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
<frozen importlib._bootstrap>:58(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
<frozen importlib._bootstrap>:318(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:151(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
{built-in method builtins.hasattr} <- 2 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:103(release) <- 1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
<frozen importlib._bootstrap>:232(_requires_builtin_wrapper) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
{built-in method builtins.any} <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
{built-in method builtins.getattr} <- 4 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:736(exec_module) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
{built-in method _imp.acquire_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
<frozen importlib._bootstrap>:369(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
<frozen importlib._bootstrap>:143(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:176(cb) <- 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
<frozen importlib._bootstrap>:222(_verbose_message) <- 1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
{method 'get' of 'dict' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
{method 'rpartition' of 'str' objects} <- 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
{built-in method _thread.allocate_lock} <- 2 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
{built-in method _imp.release_lock} <- 1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
{built-in method _imp.is_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
{built-in method _imp.exec_builtin} <- 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:307(__init__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:311(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
<frozen importlib._bootstrap>:416(parent) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:843(__enter__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
<frozen importlib._bootstrap>:847(__exit__) <- 1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
{built-in method _thread.get_ident} <- 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 <frozen importlib._bootstrap>:103(release)
{method 'disable' of '_lsprof.Profiler' objects} <-
<frozen importlib._bootstrap>:321(<genexpr>) <- 4 0.000 0.000 {built-in method builtins.any}
<frozen importlib._bootstrap>:424(has_location) <- 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
<frozen importlib._bootstrap>:753(is_package) <- 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
###Markdown
y podemos tambiรฉn tener la informaciรณn de a quรฉ funciones llamรณ cada funciรณn
###Code
p.print_callees()
###Output
Ordered by: cumulative time
Function called...
ncalls tottime cumtime
{built-in method builtins.exec} -> 1 0.014 0.630 Rcf.py:1(<module>)
Rcf.py:1(<module>) -> 1 0.000 0.000 <frozen importlib._bootstrap>:966(_find_and_load)
1 0.161 0.616 Rcf.py:2(Rcf)
1 0.000 0.000 {built-in method builtins.print}
1 0.000 0.000 {method 'format' of 'str' objects}
Rcf.py:2(Rcf) -> 1 0.113 0.113 Rcf.py:16(<listcomp>)
1000000 0.250 0.342 Rcf.py:23(<lambda>)
Rcf.py:23(<lambda>) -> 1000000 0.092 0.092 {built-in method math.exp}
Rcf.py:16(<listcomp>) ->
{built-in method math.exp} ->
<frozen importlib._bootstrap>:966(_find_and_load) -> 1 0.000 0.000 <frozen importlib._bootstrap>:143(__init__)
1 0.000 0.000 <frozen importlib._bootstrap>:147(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:151(__exit__)
1 0.000 0.000 <frozen importlib._bootstrap>:176(cb)
1 0.000 0.000 <frozen importlib._bootstrap>:936(_find_and_load_unlocked)
1 0.000 0.000 {method 'get' of 'dict' objects}
<frozen importlib._bootstrap>:936(_find_and_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:651(_load_unlocked)
1 0.000 0.000 <frozen importlib._bootstrap>:870(_find_spec)
1 0.000 0.000 {method 'rpartition' of 'str' objects}
<frozen importlib._bootstrap>:651(_load_unlocked) -> 1 0.000 0.000 <frozen importlib._bootstrap>:307(__init__)
1 0.000 0.000 <frozen importlib._bootstrap>:311(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:318(__exit__)
1 0.000 0.000 <frozen importlib._bootstrap>:564(module_from_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:736(exec_module)
1 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:564(module_from_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:504(_init_module_attrs)
1 0.000 0.000 <frozen importlib._bootstrap>:728(create_module)
1 0.000 0.000 {built-in method builtins.hasattr}
<frozen importlib._bootstrap>:147(__enter__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:78(acquire)
1 0.000 0.000 <frozen importlib._bootstrap>:157(_get_module_lock)
<frozen importlib._bootstrap>:728(create_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
<frozen importlib._bootstrap>:211(_call_with_frames_removed) -> 1 0.000 0.000 {built-in method _imp.create_builtin}
1 0.000 0.000 {built-in method _imp.exec_builtin}
{built-in method _imp.create_builtin} ->
<frozen importlib._bootstrap>:157(_get_module_lock) -> 1 0.000 0.000 <frozen importlib._bootstrap>:58(__init__)
1 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 {built-in method _imp.release_lock}
<frozen importlib._bootstrap>:78(acquire) -> 1 0.000 0.000 {built-in method _thread.get_ident}
{method 'format' of 'str' objects} ->
<frozen importlib._bootstrap>:870(_find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:707(find_spec)
1 0.000 0.000 <frozen importlib._bootstrap>:843(__enter__)
1 0.000 0.000 <frozen importlib._bootstrap>:847(__exit__)
<frozen importlib._bootstrap>:707(find_spec) -> 1 0.000 0.000 <frozen importlib._bootstrap>:433(spec_from_loader)
1 0.000 0.000 {built-in method _imp.is_builtin}
<frozen importlib._bootstrap>:433(spec_from_loader) -> 1 0.000 0.000 <frozen importlib._bootstrap>:232(_requires_builtin_wrapper)
1 0.000 0.000 <frozen importlib._bootstrap>:369(__init__)
2 0.000 0.000 {built-in method builtins.hasattr}
{built-in method builtins.print} ->
<frozen importlib._bootstrap>:504(_init_module_attrs) -> 1 0.000 0.000 <frozen importlib._bootstrap>:416(parent)
1 0.000 0.000 <frozen importlib._bootstrap>:424(has_location)
4 0.000 0.000 {built-in method builtins.getattr}
<frozen importlib._bootstrap>:58(__init__) -> 2 0.000 0.000 {built-in method _thread.allocate_lock}
<frozen importlib._bootstrap>:318(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:222(_verbose_message)
1 0.000 0.000 {built-in method builtins.any}
<frozen importlib._bootstrap>:151(__exit__) -> 1 0.000 0.000 <frozen importlib._bootstrap>:103(release)
{built-in method builtins.hasattr} ->
<frozen importlib._bootstrap>:103(release) -> 1 0.000 0.000 {built-in method _thread.get_ident}
<frozen importlib._bootstrap>:232(_requires_builtin_wrapper) -> 1 0.000 0.000 <frozen importlib._bootstrap>:753(is_package)
{built-in method builtins.any} -> 4 0.000 0.000 <frozen importlib._bootstrap>:321(<genexpr>)
{built-in method builtins.getattr} ->
<frozen importlib._bootstrap>:736(exec_module) -> 1 0.000 0.000 <frozen importlib._bootstrap>:211(_call_with_frames_removed)
{built-in method _imp.acquire_lock} ->
<frozen importlib._bootstrap>:369(__init__) ->
<frozen importlib._bootstrap>:143(__init__) ->
<frozen importlib._bootstrap>:176(cb) -> 1 0.000 0.000 {built-in method _imp.acquire_lock}
1 0.000 0.000 {built-in method _imp.release_lock}
1 0.000 0.000 {method 'get' of 'dict' objects}
<frozen importlib._bootstrap>:222(_verbose_message) ->
{method 'get' of 'dict' objects} ->
{method 'rpartition' of 'str' objects} ->
{built-in method _thread.allocate_lock} ->
{built-in method _imp.release_lock} ->
{built-in method _imp.is_builtin} ->
{built-in method _imp.exec_builtin} ->
<frozen importlib._bootstrap>:307(__init__) ->
<frozen importlib._bootstrap>:311(__enter__) ->
<frozen importlib._bootstrap>:416(parent) -> 1 0.000 0.000 {method 'rpartition' of 'str' objects}
<frozen importlib._bootstrap>:843(__enter__) -> 1 0.000 0.000 {built-in method _imp.acquire_lock}
<frozen importlib._bootstrap>:847(__exit__) -> 1 0.000 0.000 {built-in method _imp.release_lock}
{built-in method _thread.get_ident} ->
{method 'disable' of '_lsprof.Profiler' objects} ->
<frozen importlib._bootstrap>:321(<genexpr>) ->
<frozen importlib._bootstrap>:424(has_location) ->
<frozen importlib._bootstrap>:753(is_package) ->
###Markdown
El comando de magic es `%prun`:
###Code
%prun -s cumulative Rcf(f,0,1,n)
###Output
###Markdown
2) Uso de line_profiler `line_profiler` trabaja perfilando el cรณdigo de forma individual funciones lรญnea por lรญnea. La idea serรญa perfilar primero con `CProfile` al programa para identificar aquellas funciones que gastan un mayor tiempo de ejecuciรณn y posteriormente perfilarlas con `line_profiler`. **Comentario:** una buena prรกctica es guardar las diferentes versiones de tu cรณdigo cuando vas modificรกndolo para tener un registro de tus cambios. Puede ejecutarse desde la lรญnea de comandos o cargarse en IPython con el comando magic `load_ext`:
###Code
%load_ext line_profiler
%lprun?
###Output
_____no_output_____
###Markdown
En el siguiente output:* La columna `%Time` contiene el porcentaje de tiempo gastado. En el caso que se perfila, la lรญnea`sum_res=sum_res+f(node)` es en la que mรกs porcentaje del tiempo se gasta. Seguida de la lรญnea del `for` y de la lรญnea donde se hace uso de [list comprehension](https://docs.python.org/3/tutorial/datastructures.htmllist-comprehensions) para crear a los nodos de integraciรณn numรฉrica.
###Code
%lprun -f Rcf Rcf(f,0,1,n)
###Output
_____no_output_____
###Markdown
Con la evidencia generada con `line_profiler` ยฟpodrรญamos escribir una funciรณn que fuera mรกs rรกpida? Lo primero que podemos hacer es utilizar un [generator](https://wiki.python.org/moin/Generators) en lugar de una lista:
###Code
def Rcf2(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf2 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
###Output
_____no_output_____
###Markdown
medir con `%timeit`:
###Code
%timeit -n 5 -r 10 Rcf2(f,0,1,n)
###Output
434 ms ยฑ 48.4 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
revisar que estรก correcta esta nueva implementaciรณn:
###Code
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
perfilarla con `line_profiler`:
###Code
%lprun -f Rcf2 Rcf2(f,0,1,n)
###Output
_____no_output_____
###Markdown
y observar que la lรญnea en la que se creaba la lista ahora es despreciable el porcentaje de tiempo que se gasta en ella. Podemos hacer una implementaciรณn que se encargue del gasto del tiempo en la lรญnea del `for`:
###Code
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum((f(node) for node in nodes))
return h_hat*suma_res
###Output
_____no_output_____
###Markdown
medir con `%timeit`:
###Code
%timeit -n 5 -r 10 Rcf3(f,0,1,n)
###Output
476 ms ยฑ 78.9 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
revisar que estรก correcta esta nueva implementaciรณn:
###Code
err_relativo(aprox,obj)
###Output
_____no_output_____
###Markdown
perfilarla con `line_profiler`:
###Code
%lprun -f Rcf3 Rcf3(f,0,1,n)
###Output
_____no_output_____
###Markdown
y se tiene la mayorรญa del porcentaje de tiempo ahora en una sola lรญnea. Recuรฉrdese que el resultado de `Cprofile` indicรณ que se llama a la funciรณn `lambda` y `math.exp` $n=10^6$ veces. Una implementaciรณn de la regla del rectรกngulo con menor nรบmero de llamadas a funciones (y por tanto menor tiempo) serรญa:
###Code
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
%lprun -f Rcf4 Rcf4(0,1,n)
%timeit -n 5 -r 10 Rcf4(0,1,n)
###Output
343 ms ยฑ 55.1 ms per loop (mean ยฑ std. dev. of 10 runs, 5 loops each)
###Markdown
Si bien esta implementaciรณn es la mรกs rรกpida no es tan flexible pues estรก calculando la regla del rectรกngulo para una funciรณn definida dentro de la misma funciรณn. Si quisiรฉramos calcular la regla para otra funciรณn se tendrรญa que directamente modificar la funciรณn `Rcf` lo cual no es flexible. Aunque `Rcf4` es mรกs rรกpida preferimos `Rcf3` por su flexibilidad y menor uso de recursos (que se verรก con el `memory_profiler` mรกs adelante). **Ejemplo de ejecuciรณn de line_profiler desde la lรญnea de comandos:**
###Code
%%file Rcf4.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con line_profiler
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
if __name__ == "__main__":
n=10**6
print("aproximaciรณn: {:0.6e}".format(Rcf4(0,1,n)))
%%bash
$HOME/.local/bin/kernprof -l -v Rcf4.py
###Output
aproximaciรณn: 7.468241e-01
Wrote profile results to Rcf4.py.lprof
Timer unit: 1e-06 s
Total time: 0.812943 s
File: Rcf4.py
Function: Rcf4 at line 2
Line # Hits Time Per Hit % Time Line Contents
==============================================================
2 @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con line_profiler
4 def Rcf4(a,b,n):
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
9 Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors
10 Args:
11 f (lambda expression): lambda expression of integrand
12 a (int): left point of interval
13 b (int): right point of interval
14 n (int): number of subintervals
15 Returns:
16 Rcf4 (float)
17 """
18 1 3.0 3.0 0.0 h_hat=(b-a)/n
19 1 5.0 5.0 0.0 nodes=(a+(i+1/2)*h_hat for i in range(0,n))
20 1 812933.0 812933.0 100.0 suma_res = sum(((math.exp(-node**2) for node in nodes)))
21 1 2.0 2.0 0.0 return h_hat*suma_res
###Markdown
Observese en el output de `CProfile` siguiente para la funciรณn `Rcf4` que las lรญneas con mayor gasto en el tiempo total son: ``` nodes=(a+(i+1/2)*h_hat for i in range(0,n)) suma_res = sum(((math.exp(-node**2) for node in nodes)))```
###Code
import math
def Rcf4(a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Mid point is calculated via formula: x_{i-1}+(x_i-x_{i-1})/2 for i=1,...,n to avoid rounding errors
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf4 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum(((math.exp(-node**2) for node in nodes)))
return h_hat*suma_res
%prun -s cumulative Rcf4(0,1,n)
###Output
###Markdown
Uso de memoria RAM Al realizar anรกlisis del uso de memoria de tu cรณdigo podemos responder preguntas como:* ยฟEs posible utilizar menos RAM al reescribir mi funciรณn para que trabaje mรกs eficientemente?* ยฟPodemos usar mรกs RAM para aprovechar mejor el uso del cachรฉ? 1)Uso de `%memit` Es equivalente a `%timeit` en el sentido que realiza una serie de repeticiones para obtener un resultado estable del bloque de cรณdigo analizado.
###Code
%load_ext memory_profiler
%memit?
###Output
_____no_output_____
###Markdown
Primero medimos cuรกnto RAM estรก utilizando el proceso del notebook:
###Code
%memit #how much RAM this process is consuming
###Output
peak memory: 112.13 MiB, increment: 0.00 MiB
###Markdown
Y podemos realizar mediciones para cada una de las implementaciones de la regla del rectรกngulo:
###Code
%memit -c Rcf(f,0,1,n)
%memit -c Rcf2(f,0,1,n)
%memit -c Rcf3(f,0,1,10**5)
%memit -c Rcf4(0,1,10**5)
###Output
peak memory: 201.55 MiB, increment: 88.66 MiB
###Markdown
El uso de `generators` nos ayuda a disminuir la cantidad de memoria RAM usada por nuestro proceso. 2) Uso de `memory_profiler` Para mediciรณn de memoria lรญnea por lรญnea utilizamos `memory_profiler`. Se ejecuta mรกs lento que `line_profiler` (entre $10$ y $100$ veces mรกs lento!) y mejora su velocidad de ejecuciรณn al instalar el paquete `psutil`. Con lรญnea de comandos se ejecuta como sigue:
###Code
%%file Rcf_memory_profiler.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con memory_profiler
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
h_hat=(b-a)/n
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
return h_hat*sum_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf(f,0,1,n)))
###Output
Writing Rcf_memory_profiler.py
###Markdown
En el output siguiente se observa que la lรญnea que mรกs incrementa la cantidad de RAM alojada para el proceso que contiene la ejecuciรณn de la funciรณn `Rcf` es la creaciรณn de la lista de nodos `nodes=[a+(i+1/2)*h_hat for i in range(0,n)]`. **Cuidado:** el valor de la columna `Increment` para esta lรญnea no necesariamente indica que la lista `nodes` ocupa en memoria $512 MB$'s, sรณlo que para la alocaciรณn de la lista el proceso creciรณ en $512 MB$'s**Nota:** en el output aparece $MiB$ que son mebibytes. Aunque no se cumple que un mebibyte sea igual a un megabyte, se toma en este comentario como megabytes pues la diferencia entre estas unidades es sutil.
###Code
%%bash
python3 -m memory_profiler Rcf_memory_profiler.py
###Output
aproximaciรณn: 7.468241e-01
Filename: Rcf_memory_profiler.py
Line # Mem usage Increment Line Contents
================================================
2 37.750 MiB 37.750 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con memory_profiler
4 def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
9 Args:
10 f (lambda expression): lambda expression of integrand
11 a (int): left point of interval
12 b (int): right point of interval
13 n (int): number of subintervals
14 Returns:
15 Rcf (float)
16 """
17 37.750 MiB 0.000 MiB h_hat=(b-a)/n
18 69.012 MiB 0.512 MiB nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
19 69.012 MiB 0.000 MiB sum_res=0
20 69.012 MiB 0.000 MiB for node in nodes:
21 69.012 MiB 0.000 MiB sum_res=sum_res+f(node)
22 69.012 MiB 0.000 MiB return h_hat*sum_res
###Markdown
Como ya se habรญa notado, los generators ahorran memoria:
###Code
%%file Rcf3_memory_profiler.py
import math
@profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
#desea perfilarse con memory_profiler
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
h_hat=(b-a)/n
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
suma_res = sum((f(node) for node in nodes))
return h_hat*suma_res
if __name__=="__main__": #aรฑadimos este bloque para ejecuciรณn de la funciรณn Rcf3
n=10**6
f=lambda x: math.exp(-x**2)
print("aproximaciรณn: {:0.6e}".format(Rcf3(f,0,1,n)))
###Output
Writing Rcf3_memory_profiler.py
###Markdown
En el output siguiente el proceso que involucra la ejecuciรณn de la funciรณn `Rcf3` no incrementa el uso de memoria RAM por el uso de generators:
###Code
%%bash
python3 -m memory_profiler Rcf3_memory_profiler.py
###Output
aproximaciรณn: 7.468241e-01
Filename: Rcf3_memory_profiler.py
Line # Mem usage Increment Line Contents
================================================
2 37.590 MiB 37.590 MiB @profile #esta lรญnea es necesaria para indicar que la siguiente funciรณn
3 #desea perfilarse con memory_profiler
4 def Rcf3(f,a,b,n):
5 """
6 Compute numerical approximation using rectangle or mid-point method in
7 an interval.
8 Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
9 Args:
10 f (lambda expression): lambda expression of integrand
11 a (int): left point of interval
12 b (int): right point of interval
13 n (int): number of subintervals
14 Returns:
15 Rcf3 (float)
16 """
17 37.590 MiB 0.000 MiB h_hat=(b-a)/n
18 37.590 MiB 0.000 MiB nodes=(a+(i+1/2)*h_hat for i in range(0,n))
19 37.590 MiB 0.000 MiB suma_res = sum((f(node) for node in nodes))
20 37.590 MiB 0.000 MiB return h_hat*suma_res
###Markdown
3) Uso de heapy Con `heapy` podemos revisar el nรบmero y tamaรฑo de cada objeto que estรก en el heap de Python (ver [liga](https://docs.python.org/3/c-api/memory.html) y [liga2](https://stackoverflow.com/questions/14546178/does-python-have-a-stack-heap-and-how-is-memory-managed) para memory management). Tambiรฉn ayuda a encontrar **memory leaks** que ocurren si apuntamos a un objeto al que ya no deberรญamos estar apuntando... ver [liga3](https://en.wikipedia.org/wiki/Memory_leak) para saber quรฉ son las memory leaks.
###Code
import math
from guppy import hpy
def Rcf(f,a,b,n): #Rcf: rectรกngulo compuesto para f
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(i+1/2)h_hat for i=0,1,...,n and h_hat=(b-a)/n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf (float)
"""
hp=hpy()
h_hat=(b-a)/n
h=hp.heap()
print("beginning of Rcf")
print(h)
nodes=[a+(i+1/2)*h_hat for i in range(0,n)]
h=hp.heap()
print("After creating list")
print(h)
sum_res=0
for node in nodes:
sum_res=sum_res+f(node)
h=hp.heap()
print("After loop")
print(h)
return h_hat*sum_res
Rcf(f,0,1,n)
import math
from guppy import hpy
def Rcf3(f,a,b,n):
"""
Compute numerical approximation using rectangle or mid-point method in
an interval.
Nodes are generated via formula: x_i = a+(b-a)/n*i for i=0,1,...,n
Args:
f (lambda expression): lambda expression of integrand
a (int): left point of interval
b (int): right point of interval
n (int): number of subintervals
Returns:
Rcf3 (float)
"""
hp=hpy()
h_hat=(b-a)/n
h=hp.heap()
print("beginning of Rcf3")
print(h)
nodes=(a+(i+1/2)*h_hat for i in range(0,n))
h=hp.heap()
print("After creating generator")
print(h)
suma_res = sum((f(node) for node in nodes))
h=hp.heap()
print("After loop")
print(h)
return h_hat*suma_res
Rcf3(f,0,1,n)
###Output
beginning of Rcf3
Partition of a set of 451930 objects. Total size = 56178992 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123040 27 9506016 17 26823863 48 tuple
2 54025 12 4265717 8 31089580 55 bytes
3 27255 6 3942936 7 35032516 62 types.CodeType
4 25716 6 3497376 6 38529892 69 function
5 3155 1 3112744 6 41642636 74 type
6 6819 2 2830712 5 44473348 79 dict (no owner)
7 1244 0 1935072 3 46408420 83 dict of module
8 3155 1 1578376 3 47986796 85 dict of type
9 2286 1 846912 2 48833708 87 set
<1047 more rows. Type e.g. '_.more' to view.>
After creating generator
Partition of a set of 451952 objects. Total size = 56180784 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123041 27 9506072 17 26823919 48 tuple
2 54025 12 4265717 8 31089636 55 bytes
3 27255 6 3942936 7 35032572 62 types.CodeType
4 25716 6 3497376 6 38529948 69 function
5 3155 1 3112744 6 41642692 74 type
6 6820 2 2830952 5 44473644 79 dict (no owner)
7 1244 0 1935072 3 46408716 83 dict of module
8 3155 1 1578376 3 47987092 85 dict of type
9 2286 1 846912 2 48834004 87 set
<1049 more rows. Type e.g. '_.more' to view.>
After loop
Partition of a set of 451944 objects. Total size = 56179648 bytes.
Index Count % Size % Cumulative % Kind (class / dict of class)
0 126664 28 17317847 31 17317847 31 str
1 123040 27 9506016 17 26823863 48 tuple
2 54025 12 4265717 8 31089580 55 bytes
3 27255 6 3942936 7 35032516 62 types.CodeType
4 25716 6 3497376 6 38529892 69 function
5 3155 1 3112744 6 41642636 74 type
6 6819 2 2830712 5 44473348 79 dict (no owner)
7 1244 0 1935072 3 46408420 83 dict of module
8 3155 1 1578376 3 47986796 85 dict of type
9 2286 1 846912 2 48833708 87 set
<1047 more rows. Type e.g. '_.more' to view.>
|
Modulo2/2. Funciones Python.ipynb | ###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a + b
# lista con valores a ser sumados
numeros_sumar = [23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
x.append(99)
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
my_list.copy()
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? fdfsd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def par_o_impar():
numero = int(input("Escriba un nรบmero entero: "))
if 0 > numero:
print(f"Le he pedido un nรบmero entero mayor 0")
else:
if numero % 2 == 0:
print(f"El nรบmero {numero} es par.")
else:
print(f"El nรบmero {numero} es impar.")
par_o_impar()
def par_impar(number):
if (number % 2 == 0):
print("El numero es par")
else:
print("El numero es impar")
while True:
try:
number = int(input("Ingrese un numero"))
break
except:
print('valor ingresado no corresponde a un numero entero')
par_impar(number)
###Output
El numero es par
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base,altura):
return base * altura
area=area_rectangulo(15,10)
print(f'El รกrea del rectรกngulo es: {area}')
###Output
El รกrea del rectรกngulo es: 150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def rel(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print(rel(5,10))
print(rel(10,5))
print(rel(5,5))
###Output
-1
1
0
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(numero):
f = 1
if numero == 0:
print(f'{numero}! = {f}')
elif numero > 0:
for i in range(1, numero+1):
f = f * i
print(f'{numero}! = {f}')
elif numero < 0:
print('El nรบmero ingresado no es un entero no negativo')
while True:
try:
numero = int(input('Ingrese el nรบmero factorial: '))
break
except:
print('El nรบmero ingresado no es un entero')
factorial(numero)
def factorial(num):
if num > 1:
num = num * factorial(num -1)
elif num==0:
num= 1
return num
num = float(input('Ingresa el nรบmero: '))
c = factorial(num)
print(f'{c}')
###Output
Ingresa el nรบmero: 4
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a + b
# lista con valores a ser sumados
numeros_sumar = [23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
x.append(99)
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
my_list.copy()
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? fdfsd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def par_o_impar():
numero = int(input("Escriba un nรบmero entero: "))
if 0 > numero:
print(f"Le he pedido un nรบmero entero mayor 0")
else:
if numero % 2 == 0:
print(f"El nรบmero {numero} es par.")
else:
print(f"El nรบmero {numero} es impar.")
par_o_impar()
def par_impar(number):
if (number % 2 == 0):
print("El numero es par")
else:
print("El numero es impar")
while True:
try:
number = int(input("Ingrese un numero"))
break
except:
print('valor ingresado no corresponde a un numero entero')
par_impar(number)
###Output
El numero es par
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base,altura):
return base * altura
area=area_rectangulo(15,10)
print(f'El รกrea del rectรกngulo es: {area}')
###Output
El รกrea del rectรกngulo es: 150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def rel(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print(rel(5,10))
print(rel(10,5))
print(rel(5,5))
###Output
-1
1
0
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(numero):
f = 1
if numero == 0:
print(f'{numero}! = {f}')
elif numero > 0:
for i in range(1, numero+1):
f = f * i
print(f'{numero}! = {f}')
elif numero < 0:
print('El nรบmero ingresado no es un entero no negativo')
while True:
try:
numero = int(input('Ingrese el nรบmero factorial: '))
break
except:
print('El nรบmero ingresado no es un entero')
factorial(numero)
def factorial(num):
if num > 1:
num = num * factorial(num -1)
elif num==0:
num= 1
return num
num = float(input('Ingresa el nรบmero: '))
c = factorial(num)
print(f'{c}')
###Output
Ingresa el nรบmero: 4
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
my_list.copy()
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def par_o_impar():
numero = int(input("Escriba un nรบmero entero: "))
if 0 > numero:
print(f"Le he pedido un nรบmero entero mayor 0")
else:
if numero % 2 == 0:
print(f"El nรบmero {numero} es par.")
else:
print(f"El nรบmero {numero} es impar.")
par_o_impar()
def par_impar(number):
if (number % 2 == 0):
print("El numero es par")
else:
print("El numero es impar")
while True:
try:
number = int(input("Ingrese un numero: "))
break
except:
print('valor ingresado no corresponde a un numero entero:')
par_impar(number)
###Output
El numero es par
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base,altura):
return base * altura
area=area_rectangulo(15,10)
print(f'El รกrea del rectรกngulo es: {area}')
###Output
El รกrea del rectรกngulo es: 150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def rel(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print(rel(5,10))
print(rel(10,5))
print(rel(5,5))
###Output
-1
1
0
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(numero):
f = 1
if numero == 0:
print(f'{numero}! = {f}')
elif numero > 0:
for i in range(1, numero+1):
f = f * i
print(f'{numero}! = {f}')
elif numero < 0:
print('El nรบmero ingresado no es un entero no negativo')
while True:
try:
numero = int(input('Ingrese el nรบmero factorial: '))
break
except:
print('El nรบmero ingresado no es un entero')
factorial(numero)
def factorial(num):
if num > 1:
num = num * factorial(num -1)
elif num==0:
num= 1
return num
num = float(input('Ingresa el nรบmero: '))
c = factorial(num)
print(f'{c}')
###Output
Ingresa el nรบmero: 12
479001600.0
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def par_o_impar(n):
if (n%2==0):
print("El nรบmero es par")
else:
print("El nรบmero es impar")
n=int(input("Ingrese un nรบmero"))
par_o_impar(n)
###Output
Ingrese un nรบmero 7
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base, altura):
return base*altura
print( area_rectangulo(15,10) )
###Output
150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def rel(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print(rel(5,10))
print(rel(10,5))
print(rel(5,5))
###Output
-1
1
0
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(num):
if num > 1:
num = num * factorial(num -1)
elif num==0:
num= 1
return num
num = int(input('Ingresa el nรบmero: '))
c = factorial(num)
print(f'{c}')
###Output
Ingresa el nรบmero: 5
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a + b
# lista con valores a ser sumados
numeros_sumar = [23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
x.append(99)
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
my_list.copy()
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? fdfsd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def par_o_impar():
numero = int(input("Escriba un nรบmero entero: "))
if 0 > numero:
print(f"Le he pedido un nรบmero entero mayor 0")
else:
if numero % 2 == 0:
print(f"El nรบmero {numero} es par.")
else:
print(f"El nรบmero {numero} es impar.")
par_o_impar()
def par_impar(number):
if (number % 2 == 0):
print("El numero es par")
else:
print("El numero es impar")
while True:
try:
number = int(input("Ingrese un numero"))
break
except:
print('valor ingresado no corresponde a un numero entero')
par_impar(number)
###Output
El numero es par
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base,altura):
return base * altura
area=area_rectangulo(15,10)
print(f'El รกrea del rectรกngulo es: {area}')
###Output
El รกrea del rectรกngulo es: 150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def rel(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print(rel(5,10))
print(rel(10,5))
print(rel(5,5))
###Output
-1
1
0
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(numero):
f = 1
if numero == 0:
print(f'{numero}! = {f}')
elif numero > 0:
for i in range(1, numero+1):
f = f * i
print(f'{numero}! = {f}')
elif numero < 0:
print('El nรบmero ingresado no es un entero no negativo')
while True:
try:
numero = int(input('Ingrese el nรบmero factorial: '))
break
except:
print('El nรบmero ingresado no es un entero')
factorial(numero)
def factorial(num):
if num > 1:
num = num * factorial(num -1)
elif num==0:
num= 1
return num
num = float(input('Ingresa el nรบmero: '))
c = factorial(num)
print(f'{c}')
###Output
Ingresa el nรบmero: 4
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def poi(a):
if (a % 2 == 0):
print(f'El nรบmero {a} es par')
else:
print(f'El nรบmero {a} es impar')
a = int(input('Insertar un nรบmero: '))
poi(a)
###Output
Insertar un nรบmero: 2
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base, altura):
return base * altura
area = area_rectangulo(15, 10)
print(f'El รกrea del rectรกngulo es: {area}')
###Output
El รกrea del rectรกngulo es: 150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def relacion(a,b):
if a > b:
return 1
elif b < a:
return -1
else:
return 0
a = float(input('Ingresa el primer nรบmero: '))
b = float(input('Ingresa el segundo nรบmero: '))
c = relacion(a,b)
print(f'{c}')
###Output
Ingresa el primer nรบmero: 5
Ingresa el segundo nรบmero: 2
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(numero):
f = 1
if numero == 0:
print(f'{numero}! = {f}')
elif numero > 0:
for i in range(1, numero+1):
f = f * i
print(f'{numero}! = {f}')
elif numero < 0:
print('El nรบmero ingresado no es un entero no negativo')
while True:
try:
numero = int(input('Ingrese el nรบmero factorial: '))
break
except:
print('El nรบmero ingresado no es un entero')
factorial(numero)
###Output
Ingrese el nรบmero factorial: 5
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
return NoImplementError
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre+ ' ' + apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=100, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
return x + 90
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'})
###Output
5
Hola
[1, 2, 3, 4, 5]
{'dia': 'sabado'}
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
sumar(numeros_sumar[0],numeros_sumar[1])
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe * (1 - descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
calcular(datos['importe'], datos['descuento'])
###Output
_____no_output_____
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200})
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
datos_lista =['Gonzalo', 26]
"Hola {0}, tu edad es {1}".format(*datos_lista)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
#print('valor de x dentro de la funcion "bar" es ', x)
return x
x = 3
bar(x)
print('valor de x a nivel global es ', x)
x = bar(x)
print(x)
###Output
93
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
my_var
###Output
_____no_output_____
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x : list):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
my_list = [1, 2, 3]
my_list2 = my_list.copy()
foo(my_list2)
my_list
my_list2
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
def foo():
global x
x = 42 # reasignacion total x
print('valor de x final', x)
return x
# llamo a la funcion
x = 7
foo()
print(x)
###Output
valor de x final 42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento : int =1 ):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? rojo
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
num = 3
def numero_par(num):
if num % 2 == 0:
print(f'El numero es par {num}')
else:
print(f'el numero es impar {num}')
# numero es par o impar?
numero_par(num)
###Output
el numero es impar 3
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def es_par(numero):
if numero%2==0:
print("es un numero par")
else:
print("es un numero impar")
numero = int(input('ingrese un numero: '))
es_par(numero)
###Output
es un numero par
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base, altura):
return base * altura
base = 15
altura = 10
area_rectangulo(base, altura)
###Output
_____no_output_____
###Markdown
Crear una funcion para sumar elementos de una lista
###Code
list_n = [20, 10, 30, 40, 50 ,60]
suma = 0
for n in list_n:
suma = suma + n
suma
###Output
_____no_output_____
###Markdown
en funcion
###Code
def sumar_elemetos_lista(lista_numeros):
suma = 0
for n in lista_numeros:
suma = suma + n
return suma
list_n = [20, 10, 30, 40, 50 ,60]
sumar_elemetos_lista(list_n)
###Output
_____no_output_____
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
list_n = [20, 10, 30, 40, 50 ,60]
###Output
_____no_output_____
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
numero = 5
factorial = 1
for n in range(numero):
factorial = factorial * (n+1)
print(f'factorial de {n+1} es {factorial}')
factorial
###Output
factorial de 1 es 1
factorial de 2 es 2
factorial de 3 es 6
factorial de 4 es 24
factorial de 5 es 120
###Markdown
creando funcion
###Code
def factorial_n(numero):
factorial = 1
for n in range(numero):
factorial = factorial * (n+1)
print(f'factorial de {n+1} es {factorial}')
return factorial
factorial_n(5)
###Output
factorial de 1 es 1
factorial de 2 es 2
factorial de 3 es 6
factorial de 4 es 24
factorial de 5 es 120
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
num = input("Introduce un nรบmero: ")
num = int(num)
if num == 0:
print ("Este nรบmero es par.")
elif num%2 == 0:
print ("Este numero es par")
else:
print ("Este numero es impar")
###Output
_____no_output_____
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rec(b, h):
return b*h
print(area_rec(15,10))
###Output
150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def rel(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print(rel(5,10))
print(rel(10,5))
print(rel(5,5))
def relacion(a,b):
if a > b:
return 1
elif b < a:
return -1
elif a == b:
return 0
a = float(input('Ingresa el primer nรบmero: '))
b = float(input('Ingresa el segundo nรบmero: '))
c = relacion(a,b)
print(f'{c}')
###Output
Ingresa el primer nรบmero: 5
Ingresa el segundo nรบmero: 10
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(num):
if num > 1:
num = num * factorial(num -1)
return num
num = float(input('Ingresa el nรบmero: '))
c = factorial(num)
print(f'{c}')
###Output
Ingresa el nรบmero: 5
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
#pass # uso del pass es opcional
return NoImplementError
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = "Bienvenido {} {}".format(nombre , apellido)
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(numero1, numero2): # valores que se reciben
return numero1 + numero2
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
print(resta(100,10)) # 100 - 10
resta(b=100, a=10) # 10 -100
###Output
90
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
return x + 90
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def sumar(a,b):
return a +b
def suma_indeterminado(*args):
sumatoria = 0
for arg in args:
sumatoria += arg
return sumatoria
listado_numeros = [23, 12,2,5,7]
suma_indeterminado(23, 12,2,5,7)
suma_indeterminado(*listado_numeros)
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'})
###Output
5
Hola
[1, 2, 3, 4, 5]
{'dia': 'sabado'}
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
sumar(numeros_sumar[0],numeros_sumar[1])
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe * (1 - descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
calcular(datos['importe'], datos['descuento'])
###Output
_____no_output_____
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200})
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
datos_lista =['Gonzalo', 26]
"Hola {0}, tu edad es {1}".format(*datos_lista)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
#print('valor de x dentro de la funcion "bar" es ', x)
return x
x = 3
x = bar(x)
print('valor de x a nivel global es ', x)
x = bar(x)
print(x)
###Output
183
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
my_var
###Output
_____no_output_____
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x : list):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
my_list = [1, 2, 3]
my_list2 = my_list.copy()
foo(my_list2)
my_list
my_list2
# Valor puede
def foo2(x : list):
x[0] = x[0] * 99
return x
# lista original
my_list = [1, 2, 3]
lista_y = foo2(my_list.copy())
my_list
lista_y
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
def foo():
global x
x = 42 # reasignacion total x
print('valor de x final', x)
return x
# llamo a la funcion
x = 7
foo()
print(x)
x= 3
a = x
a = 2
print(x)
print(a)
lista_a = [1,2,3,4]
lista_b = lista_a.copy()
lista_b[0] = "Hola"
print(lista_b)
print(lista_a)
###Output
[1, 2, 3, 4]
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento : int =1 ):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
def sumaRecursiva(n):
if n==1:
return 1
else:
return n+sumaRecursiva(n-1)
jugar()
jugar()
###Output
_____no_output_____
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def par_impar(numero):
if numero % 2 == 0:
print(f"El nรบmero {numero} es par")
else:
print(f"el numero {numero} es impar")
numero = int(input("Ingrese un numero: "))
numero
par_impar(numero)
###Output
el numero 7 es impar
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base, altura):
return base * altura / 2
b= 15
h = 10
area = area_rectangulo(b, h)
print("El รกrea del triangulo de base = {} y altura = {} es: {}".format(b,h,area))
###Output
El รกrea del triangulo de base = 15 y altura = 10 es: 75.0
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
print()
###Output
_____no_output_____
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre+ ' ' + apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=100, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
return x + 90
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'})
###Output
5
Hola
[1, 2, 3, 4, 5]
{'dia': 'sabado'}
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200})
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
datos_lista =['Gonzalo', 26]
"Hola {0}, tu edad es {1}".format(*datos_lista)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
print('valor de x dentro de la funcion "bar" es ', x)
x = 3
bar(x)
print('valor de x a nivel global es ', x)
###Output
valor de x dentro de la funcion "bar" es 93
valor de x a nivel global es 3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
my_var
###Output
_____no_output_____
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
my_list = [1, 2, 3]
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
def foo():
global x
x = 42 # reasignacion total x
print('valor de x final', x)
return x
# llamo a la funcion
x = 7
foo()
print(x)
###Output
valor de x final 42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? blue
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def validar_par(num):
if num % 2 == 0:
return True
return False
numero = int(input("Ingrese un nรบmero: "))
numero
if validar_par(numero):
print('El nรบmero ingresado {} es par'.format(numero))
else:
print('El nรบmero ingresado {} es impar'.format(numero))
###Output
El nรบmero ingresado 25 es impar
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
# funciones
def area_rectangulo(b, h):
return b * h / 2
# mi programa princial
base = int(input('Ingrese la base del triangulo: '))
altura = int(input('ingrese la altura del triangulo: '))
area1 = area_rectangulo(base, altura)
area2 = area_rectangulo(2, 2)
print('area 1: ', area1)
print('area 2: ', area2)
print('area 3', area_rectangulo(7, 4))
###Output
area 1: 6.0
area 2: 2.0
area 3 14.0
###Markdown
3.Crear una funcion para sumar elementos de una lista
###Code
list_n = [20, 10, 30, 40, 50 ,60]
###Output
_____no_output_____
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(num):
fac = 1
for i in range(1,num+1):
fac = fac * i
return fac
n = 6
factorial(n)
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
n=int(input("Ingrese un numero : "))
print(factorial(n))
###Output
Ingrese un numero : 5
###Markdown
5.Realizar una funciรณn que realice la sucesiรณn de fibonacci:- 0, 1, 1, 2, 3, 5, 8, 13, 21, 34
###Code
def serie_fibo(n):
fibonacci = [0,1]
for i in range(2,n+1):
fibonacci.append(fibonacci[-2] + fibonacci[-1])
return fibonacci
n = 9
serie_fibo(n)
###Output
_____no_output_____
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre, apellido):
nombre_completo = nombre, apellido
print(nombre_completo)
mi_funcion('gonzalo','delgado')
mi_funcion()
# Ejemplo 2
def suma(a, b): # valores que se reciben
return a + b
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
resta(b=30, a=10)
###Output
_____no_output_____
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
x = x + 90
return x
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5])
###Output
5
Hola
[1, 2, 3, 4, 5]
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
###Output
34
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
n => 5
c => Hola
l => [1, 2, 3, 4, 5]
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe - (importe * descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
###Output
1350.0
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300, nombre="Hector", edad=27)
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
x = 3
bar(x)
print(x)
###Output
3
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
###Output
93
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
foo(my_list.copy())
my_list
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
x = 7
def foo():
global x
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
###Output
42
42
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento=1):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
ยฟDe quรฉ color es una naranja? asdasd
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar.
###Code
def main():
print("PARES E IMPARES")
numero_1 = int(input("Escriba un nรบmero entero: "))
numero_2 = int(input(f"Escriba un nรบmero entero mayor o igual que {numero_1}: "))
if numero_2 < numero_1:
print(f"ยกLe he pedido un nรบmero entero mayor o igual que {numero_1}!")
else:
for i in range(numero_1, numero_2 + 1):
if i % 2 == 0:
print(f"El nรบmero {i} es par.")
else:
print(f"El nรบmero {i} es impar.")
if __name__ == "__main__":
main()
###Output
_____no_output_____
###Markdown
2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura:
###Code
def area_rectangulo(base, altura):
return base*altura
print( area_rectangulo(15,10) )
###Output
150
###Markdown
3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'.
###Code
def relacion(a, b):
if a > b:
return 1
elif a < b:
return -1
else:
return 0
print( relacion(5, 10) )
print( relacion(10, 5) )
print( relacion(5, 5) )
###Output
_____no_output_____
###Markdown
4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120
###Code
def factorial(num):
print("Valor inicial ->",num)
if num > 1:
num = num * factorial(num -1)
print("valor final ->",num)
return num
print(factorial(5))
###Output
_____no_output_____
###Markdown
Funciones Python Las funciones son fragmentos de cรณdigo que se pueden ejecutar mรบltiples veces, ademรกs pueden recibir y devolver informaciรณn para comunicarse con el proceso principal.
###Code
def mi_funcion():
# aquรญ mi codigo
pass # uso del pass es opcional
return NoImplementError
###Output
_____no_output_____
###Markdown
Una funciรณn, no es ejecutada hasta tanto no sea invocada. Para invocar una funciรณn, simplemente se la llama por su nombre:
###Code
# defino mi funciรณn
def hola():
print("Hola Mundo")
# llamo mi funciรณn
hola()
###Output
Hola Mundo
###Markdown
Cuando una funciรณn, haga un retorno de datos, รฉstos, pueden ser asignados a una variable:
###Code
# Funciรณn retorna la palabra "Hola Mundo"
def funcion():
return "Hola Mundo"
# Almaceno el valor devuelto en una variable
frase = funcion()
print(frase)
###Output
Hola Mundo
###Markdown
Funciรณn con Parรกmetros----------------------------------- Un parรกmetro es un valor que la funciรณn espera recibir cuando sea llamada (invocada), a fin de ejecutar acciones en base al mismo. Una funciรณn puede esperar uno o mรกs parรกmetros (que irรกn separados por una coma) o ninguno.
###Code
def mi_funcion(nombre, apellido):
# algoritmo
pass
###Output
_____no_output_____
###Markdown
Los parรกmetros que una funciรณn espera, serรกn utilizados por รฉsta, dentro de su algoritmo, a modo de variables de รกmbito local. Es decir, que los parรกmetros serรกn variables locales, a las cuรกles solo la funciรณn podrรก acceder:
###Code
# Ejemplo funciรณn con parรกmetros
def mi_funcion(nombre):
print(f"Hola {nombre}")
mi_funcion('gonzalo')
mi_funcion()
# Ejemplo 2
def suma(numero1, numero2): # valores que se reciben
return numero1 + numero2
a = 5
b = 6
resultado = suma(a, b) # valores que se envรญan
print(resultado)
###Output
11
###Markdown
Cuando pasamos parรกmetros a nuestra funciรณn, esta entiende cada valor por la posiciรณn en que se ha descrito en la funciรณn
###Code
# Ejemplo3
def resta(a, b):
return a - b
# argumento 30 => posiciรณn 0 => parรกmetro a
# argumento 10 => posiciรณn 1 => parรกmetro b
resta(30, 10)
###Output
_____no_output_____
###Markdown
Una forma de cambiar el orden en como entiende la funciรณn en que orden queremos pasar los parรกmetros es la siguiente:
###Code
print(resta(100,10))
resta(b=100, a=10)
###Output
90
###Markdown
Valores por Defecto Es posible colocar valores por defecto en nuestras funciones, asi si no se pasa un parรกmetro, nuestra funciรณn seguira funcionando
###Code
def bar(x=2):
return x + 90
# my_var = 3
print(bar())
# pasando un valor a mi funcion
print(bar(6))
###Output
96
###Markdown
Desempaquetado de datos Muchas veces se utilizan listas , tuplas o diccionarios para contener diversa cantidad de datos. En ese sentido, es posible desempaquetar los valores contenidos en este tipo de datos para que puedan ser leidos por la funcion Args Cuando no se sabe la cantidad de valores
###Code
def indeterminados_posicion(*args):
for arg in args:
print(arg)
indeterminados_posicion(5,"Hola",[1,2,3,4,5],{'dia':'sabado'})
###Output
_____no_output_____
###Markdown
Cuando se tiene los valores en lista
###Code
# valores a ser sumados se encuentran en una lista
def sumar(a,b):
return a+b
# lista con valores a ser sumados
numeros_sumar=[23,11]
print(sumar(*numeros_sumar))
sumar(numeros_sumar[0],numeros_sumar[1])
###Output
_____no_output_____
###Markdown
kwargs Cuando no se sabe la cantidad de valores
###Code
def indeterminados_nombre(**kwargs):
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
indeterminados_nombre(n=5, c="Hola", l=[1,2,3,4,5])
###Output
_____no_output_____
###Markdown
Valores contenidos en diccionario
###Code
def calcular(importe, descuento):
return importe * (1 - descuento / 100)
datos = {
"descuento": 10,
"importe": 1500
}
print(calcular(**datos))
calcular(datos['importe'], datos['descuento'])
###Output
_____no_output_____
###Markdown
Combinando ambos conceptos
###Code
def super_funcion(*args,**kwargs):
total = 0
for arg in args:
total += arg
print("sumatorio => ", total)
for kwarg in kwargs:
print(kwarg, "=>", kwargs[kwarg])
super_funcion(10, 50, -1, 1.56, 10, 20, 300,*[20,30], nombre="Hector", edad=27,**{'sueldo':1200})
datos_persona ={
'nombre':'Gonzalo',
'edad': 26
}
"Hola {nombre}, tu edad es {edad}".format(**datos_persona)
datos_lista =['Gonzalo', 26]
"Hola {0}, tu edad es {1}".format(*datos_lista)
###Output
_____no_output_____
###Markdown
Paso por valor y referencia----------------------------------- Dependiendo del tipo de dato que enviemos a la funciรณn, podemos diferenciar dos comportamientos:- Paso por valor: Se crea una copia local de la variable dentro de la funciรณn.- Paso por referencia: Se maneja directamente la variable, los cambios realizados dentro de la funciรณn le afectarรกn tambiรฉn fuera.Tradicionalmente:- Los tipos simples se pasan por valor (Inmutables): Enteros, flotantes, cadenas, lรณgicos...- Los tipos compuestos se pasan por referencia (Mutables): Listas, diccionarios, conjuntos... Paso por valor
###Code
# Valor pasado por valor. Este genera una copia al valor pasado para no alterar el valor original del dato
def bar(x):
x = x + 90
#print('valor de x dentro de la funcion "bar" es ', x)
return x
x = 3
bar(x)
print('valor de x a nivel global es ', x)
x = bar(x)
print(x)
###Output
_____no_output_____
###Markdown
###Code
# Para cambiar el valor de estos valores, podrรญamos reasignar el valor de variable en algunos casos
def bar(x):
return x + 90
my_var = 3
my_var = bar(my_var)
print(my_var)
my_var
###Output
_____no_output_____
###Markdown
Paso por referencia Las listas u otras colecciones, al ser tipos compuestos se pasan por referencia, y si las modificamos dentro de la funciรณn estaremos modificรกndolas tambiรฉn fuera:
###Code
# Valor puede
def foo(x : list):
x[0] = x[0] * 99
# lista original
my_list = [1, 2, 3]
foo(my_list)
my_list
###Output
_____no_output_____
###Markdown
###Code
# asi se genere una copia simple, esto no soluciona el problema
my_list2 = my_list
foo(my_list2)
my_list2
my_list
# se puede solucionar realizando una copia al objeto
my_list = [1, 2, 3]
my_list2 = my_list.copy()
foo(my_list2)
my_list
my_list2
###Output
_____no_output_____
###Markdown
รmbito de variables en funciones----------------------------------- Ejemplo
###Code
# valor de variable global 'x' se mantiene
x = 7
def foo():
x = 42
print(x)
# llamo a la funcion
foo()
print(x)
# Global indica que se va a trabar con variable global por lo que cuando redefinimos a la variable,
# se cambia el valor global de esta
def foo():
global x
x = 42 # reasignacion total x
print('valor de x final', x)
return x
# llamo a la funcion
x = 7
foo()
print(x)
###Output
_____no_output_____
###Markdown
Funciรณn Recursivas----------------------------------- Se trata de funciones que se llaman a sรญ mismas durante su propia ejecuciรณn. Funcionan de forma similar a las iteraciones, pero debemos encargarnos de planificar el momento en que dejan de llamarse a sรญ mismas o tendremos una funciรณn rescursiva infinita.Suele utilizarse para dividir una tarea en subtareas mรกs simples de forma que sea mรกs fรกcil abordar el problema y solucionarlo.
###Code
def jugar(intento : int =1 ):
respuesta = input("ยฟDe quรฉ color es una naranja? ")
if respuesta.lower() != "naranja":
if intento < 3:
print("\nFallaste! Intรฉntalo de nuevo")
intento += 1
jugar(intento) # Llamada recursiva
else:
print("\nPerdiste!")
else:
print("\nGanaste!")
jugar()
###Output
_____no_output_____
###Markdown
Ejercicios 1.Realiza una funciรณn que indique si un nรบmero pasado por parรกmetro es par o impar. 2.Realiza una funciรณn llamada area_rectangulo(base, altura) que devuelva el รกrea del rectangulo a partir de una base y una altura. Calcula el รกrea de un rectรกngulo de 15 de base y 10 de altura: 3.Realiza una funciรณn llamada relacion(a, b) que a partir de dos nรบmeros cumpla lo siguiente:- Si el primer nรบmero es mayor que el segundo, debe devolver 1.- Si el primer nรบmero es menor que el segundo, debe devolver -1.- Si ambos nรบmeros son iguales, debe devolver un 0.Comprueba la relaciรณn entre los nรบmeros: '5 y 10', '10 y 5' y '5 y 5'. 4.El factorial de un nรบmero corresponde al producto de todos los nรบmeros desde 1 hasta el propio nรบmero. Es el ejemplo con retorno mรกs utilizado para mostrar la utilidad de este tipo de funciones:- 3! = 1 x 2 x 3 = 6- 5! = 1 x 2 x 3 x 4 x 5 = 120 5.Escribir una funciรณn que, dado un nรบmero de DNI, retorne True si el nรบmero es vรกlido y False si no lo es. Para que un nรบmero de DNI sea vรกlido debe tener entre 7 y 8 dรญgitos.
###Code
num_dni = "70612302562"
def is_valido_dni(dni):
if len(dni) >=7 and len(dni)<=8:
return True
else:
return False
num_dni = "70612302"
if is_valido_dni(num_dni):
print("EL DNI ES VALIDO")
else:
print("EL DNI ES INVALIDO")
###Output
EL DNI ES VALIDO
|
lecture-06/lab.ipynb | ###Markdown
[](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-06/lab.ipynb) Contents and why we need this labThis lab is about implementing neural networks yourself from scratch. All the modern frameworks for deep learning use automatic differentiation (autodiff) so you don't have to code the backward step yourself. In this version of this lab you will develop your own autodif implementation, and use this to build a simple neural network. Once you've done this lab you should have a very good understanding of what goes on below the hood in the modern framework such as [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) or [JAX](https://github.com/google/jax). In particular the code we'll develop will look quite similar to the pytorch API. External sources of information1. Jupyter notebook. You can find more information about Jupyter notebooks [here](https://jupyter.org/). It will come as part of the [Anaconda](https://www.anaconda.com/) Python installation. You can also use [colab](colab.to), which is a free online jupyter notebook.3. [Nanograd](https://github.com/rasmusbergpalm/nanograd) is a minimalistic version of autodiff developed by Rasmus Berg Palm that we use for our framework. Nanograd automatic differention framework The [Nanograd](https://github.com/rasmusbergpalm/nanograd) framework defines a class Var which both holds a value and gradient value that we can use to store the intermediate values when we apply the chain rule of differentiation.
###Code
# Copy and pasted from https://github.com/rasmusbergpalm/nanograd/blob/main/nanograd.py
from typing import Union
from math import tanh
class Var:
"""
A variable which holds a number and enables gradient computations.
"""
def __init__(self, val: Union[float, int], parents=None):
assert type(val) in {float, int}
if parents is None:
parents = []
self.v = val
self.parents = parents
self.grad = 0.0
def backprop(self, bp):
self.grad += bp
for parent, grad in self.parents:
parent.backprop(grad * bp)
def backward(self):
self.backprop(1.0)
def __add__(self: 'Var', other: 'Var') -> 'Var':
return Var(self.v + other.v, [(self, 1.0), (other, 1.0)])
def __mul__(self: 'Var', other: 'Var') -> 'Var':
return Var(self.v * other.v, [(self, other.v), (other, self.v)])
def __pow__(self, power: Union[float, int]) -> 'Var':
assert type(power) in {float, int}, "power must be float or int"
return Var(self.v ** power, [(self, power * self.v ** (power - 1))])
def __neg__(self: 'Var') -> 'Var':
return Var(-1.0) * self
def __sub__(self: 'Var', other: 'Var') -> 'Var':
return self + (-other)
def __truediv__(self: 'Var', other: 'Var') -> 'Var':
return self * other ** -1
def tanh(self) -> 'Var':
return Var(tanh(self.v), [(self, 1 - tanh(self.v) ** 2)])
def relu(self) -> 'Var':
return Var(self.v if self.v > 0.0 else 0.0, [(self, 1.0 if self.v > 0.0 else 0.0)])
def __repr__(self):
return "Var(v=%.4f, grad=%.4f)" % (self.v, self.grad)
###Output
_____no_output_____
###Markdown
A few examples illustrate how we can use this:
###Code
a = Var(3.0)
b = Var(5.0)
f = a * b
f.backward()
for v in [a, b, f]:
print(v)
a = Var(3.0)
b = Var(5.0)
c = a * b
d = Var(9.0)
e = a * d
f = c + e
f.backward()
for v in [a, b, c, d, e, f]:
print(v)
###Output
Var(v=3.0000, grad=14.0000)
Var(v=5.0000, grad=3.0000)
Var(v=15.0000, grad=1.0000)
Var(v=9.0000, grad=3.0000)
Var(v=27.0000, grad=1.0000)
Var(v=42.0000, grad=1.0000)
###Markdown
Exercise a) What is being calculated?
Explain briefly the output of the code? What is the expression we differentiate and with respect to what variables?ยจ
For the first example we differentiate f. f with respect to itself, then with respect to a, and then with respect to b. Summing the results, disregarding the trivial case, will, according to the chain rule, gives us the full derivative of f.
The same is true for the second example. It's just longer.
###Code
from IPython.display import Image
###Output
_____no_output_____
###Markdown
Exercise b) How does the backward function work?
For the first example above, execute the backward function by hand to convince yourself that it indeed calculates the gradients with respect to the variables. Write down the sequence of calls to backprop for the first example above.

Exercise c) What happens if we run backward again?
Try to execute the code below. Explain what happens.
It does the gradient calculation again, ends at the same results but add them to the previous gradients. Sort of like if we added two function nodes at the top of the chain, wich were just the identity functions. Because then every gradient would be calculated twice, and added.
###Code
f.backward()
for v in [a, b, c, d, e, f]:
print(v)
###Output
Var(v=3.0000, grad=28.0000)
Var(v=5.0000, grad=6.0000)
Var(v=15.0000, grad=2.0000)
Var(v=9.0000, grad=6.0000)
Var(v=27.0000, grad=2.0000)
Var(v=42.0000, grad=2.0000)
###Markdown
Exercise d) Test correctness of derivatives with the finite difference methodWrite a small function that uses [the finite difference method](https://en.wikipedia.org/wiki/Finite_difference_method) to numerically compute the gradient:$$\frac{\partial f(x)}{\partial x} \approx \frac{f(x+dx)-f(x)}{dx}$$for a very small $dx$.
###Code
def finite_difference(fn, x_val, dx=1e-10):
"""
Computes the finite difference numerical approximation to the derivative of fn(x) with respect to x at x_val: (fn(x_val + dx) - fn(x_val))/dx
"""
return (fn(x_val + dx) - fn(x_val))/dx
###Output
_____no_output_____
###Markdown
Use your finite difference function to compute the gradient of $f$ with respect to $a$ and $b$ in the following function: $f(x) = a \cdot b + b$, at a=3 and b=5.
###Code
# test function - try to change into other functions as well
def f(a, b):
return a*b + b
def f_b(b):
return 3*b + b
def f_a(a):
return a*5+5
finite_difference(f_b, 3) + finite_difference(f_a,5)
###Output
_____no_output_____
###Markdown
Write the same function using Nanograd `Var`s and verify that Nanograd computes the same gradients
###Code
a = Var(3)
b = Var(5)
c = a * b + b
c.backward()
print(a)
print(b)
print(c)
###Output
Var(v=3.0000, grad=5.0000)
Var(v=5.0000, grad=4.0000)
Var(v=20.0000, grad=1.0000)
###Markdown
Create an artificial dataset to play withWe create a non-linear 1d regression task. The generator supports various noise levels. You can modify it yourself if you want more or less challenging tasks.
###Code
from math import sin
import random
import tqdm as tqdm
import matplotlib.pyplot as plt
def sample_data(noise=0.3):
x = (random.random() - 0.5) * 10
return x, sin(x) + x + random.gauss(0, noise)
train_data = [sample_data() for _ in range(100)]
val_data = [sample_data() for _ in range(100)]
for x, y in train_data:
plt.plot(x, y, 'b.')
plt.show()
###Output
_____no_output_____
###Markdown
Building the neural network.We'll create a feedforward neural network consisting of a series of dense layers. See the image below. Each dense layer is just a number of artificial neurons. In the image below each column of circles (neurons) is a dense layer. It's dense because the weight matrix is dense; there's a connection between every input and every output neuron in the layer.The inputs to create a dense layer is following:1. **The input size and output size**. We have to define the number of inputs and outputs. The inputs are the number of inputs to the layer, and the output size is the number of artificial neurons the layer has.2. **Activation functions**. Each dense layer must have an activation function (it can also be the linear activation which is equivalent to identity function). The power of neural networks comes from non-linear activation functions.3. **Parameter initialization**. We will initialize the weights to have random values. This is done in practice by drawing pseudo random numbers from a Gaussian or uniform distribution. It turns out that for deeper models we have to be careful about how we scale the random numbers. This will be the topic of a later exercice. For now we will just use simple Gaussians. See the `Initializer` class below.Note that we use Sequence in the code below. A Sequence is an ordered list. This means the order we insert and access items are the same. 
###Code
from typing import Sequence
class Initializer:
def init_weights(self, n_in, n_out) -> Sequence[Sequence[Var]]:
raise NotImplementedError
def init_bias(self, n_out) -> Sequence[Var]:
raise NotImplementedError
class NormalInitializer(Initializer):
def __init__(self, mean=0, std=0.1):
self.mean = mean
self.std = std
def init_weights(self, n_in, n_out):
return [[Var(random.gauss(self.mean, self.std)) for _ in range(n_out)] for _ in range(n_in)]
def init_bias(self, n_out):
return [Var(0.0) for _ in range(n_out)]
###Output
_____no_output_____
###Markdown
Exercise e) Dense layerComplete the DenseLayer class below. The dense layer takes an input vector and computes an output vector corresponding to the value of each artificial neuron in the dense layer.
###Code
class DenseLayer:
def __init__(self, n_in: int, n_out: int, act_fn, initializer: Initializer = NormalInitializer()):
"""
n_in: the number of inputs to the layer
n_out: the number of output neurons in the layer
act_fn: the non-linear activation function for each neuron
initializer: The initializer to use to initialize the weights and biases
"""
self.weights = initializer.init_weights(n_in, n_out)
self.bias = initializer.init_bias(n_out)
self.act_fn = act_fn
def __repr__(self):
return 'Weights: ' + repr(self.weights) + ' Biases: ' + repr(self.bias)
def parameters(self) -> Sequence[Var]:
"""Returns all the vars of the layer (weights + biases) as a single flat list"""
flat_weights = [weight for sublist in self.weights for weight in sublist]
return flat_weights + self.bias
def forward(self, inputs: Sequence[Var]) -> Sequence[Var]:
"""
inputs: A n_in length vector of Var's corresponding to the previous layer outputs or the data if it's the first layer.
Computes the forward pass of the dense layer: For each output neuron, j, it computes: act_fn(weights[i][j]*inputs[i] + bias[j])
Returns a vector of Vars that is n_out long.
"""
assert len(self.weights) == len(inputs), "weights and inputs must match in first dimension"
output: Sequence = []
for j in range(len(self.bias)):
_sum = self.bias[j]
for i in range(len(inputs)):
_sum += self.weights[i][j] * inputs[i]
output.append(self.act_fn(_sum))
return output
###Output
_____no_output_____
###Markdown
Verify that your class is correct by running the code below, and verifying that `actual` is the same as `expected`. Here we define a small 3x2 dense layer with some fixed parameters and use numpy to compute the expected values.
###Code
import numpy as np
np.random.seed(0)
w = np.random.randn(3, 2)
b = np.random.randn(2)
x = np.random.randn(3)
expected = np.tanh(x@w+b)
class FixedInit(Initializer):
"""
An initializer used for debugging that will return the w and b variables defined above regardless of the input and output size.
"""
def init_weights(self, n_in, n_out):
return [list(map(Var, r.tolist())) for r in w]
def init_bias(self, n_out):
return list(map(Var, b.tolist()))
layer = DenseLayer(3, 2, lambda x: x.tanh(), FixedInit())
var_x = list(map(Var, x.tolist()))
actual = layer.forward(var_x)
print(actual)
print(expected)
###Output
[Var(v=0.8935, grad=0.0000), Var(v=0.5275, grad=0.0000)]
[0.89347265 0.52750061]
###Markdown
Exercise f) MLPWe'll now combine multiple DenseLayers into a neural network. We'll define a class to help us with this. We name it Multi-Layer Perceptron (MLP), since in the "old days", a single dense layer neural network was called a perceptron. It takes a list of DenseLayer as input and defines a forward function. The forward function takes a vector of inputs, the data inputs, and return a vector of outputs, the output of the neural network, after being passed through each layer of the network. It also has a parameters function which just returns all the parameters of the layers as a single flat list.Complete the MLP class below.
###Code
class MLP:
def __init__(self, layers: Sequence[DenseLayer]):
self.layers = layers
def parameters(self) -> Sequence[Var]:
""" Returns all the parameters of the layers as a flat list"""
output = []
for layer in self.layers:
output += layer.parameters()
return output
def forward(self, x: Sequence[Var]) -> Sequence[Var]:
"""
Computes the forward pass of the MLP: x = layer(x) for each layer in layers
"""
for layer in self.layers:
x = layer.forward(x)
return x
###Output
_____no_output_____
###Markdown
Exercise g) SGDNow we need code that will perform the stochastic gradient descent. Complete the class below
###Code
class SGD:
def __init__(self, parameters: Sequence[Var], learning_rate: float):
self.parameters = parameters
self.learning_rate = learning_rate
def zero_grad(self):
""" Set the gradient to zero for all parameters """
for param in self.parameters:
param.grad = 0.0
def step(self):
"""Performs a single step of SGD for each parameter: p = p - learning_rate * grad_p """
for param in self.parameters:
param.v += - learning_rate * param.grad
###Output
_____no_output_____
###Markdown
Loss functionsWe are only missing a loss function now. We're doing regression so we'll use the L2 loss function $L2(t, y) = (t-y)^2$, where $t$ is the expected output (the target) and $y$ is the output of the neural network.
###Code
def squared_loss(t: Var, y: Var) -> Var:
return (t-y)**2
###Output
_____no_output_____
###Markdown
Backward passNow the magic happens! We get the calculation of the gradients for free. Let's see how it works.
###Code
mlp = MLP([
DenseLayer(1, 5, lambda x: x.tanh()),
DenseLayer(5, 1, lambda x: x)
])
x, t = sample_data()
x = Var(x)
t = Var(t)
y = mlp.forward([x])
loss = squared_loss(t, y[0])
loss.backward()
###Output
_____no_output_____
###Markdown
and the gradients will be calculated:
###Code
for i,layer in enumerate(mlp.layers):
print("layer", i, layer)
###Output
layer 0 Weights: [[Var(v=0.2013, grad=0.9330), Var(v=-0.0170, grad=1.2256), Var(v=0.0649, grad=-0.2560), Var(v=-0.0160, grad=-0.0814), Var(v=0.1272, grad=-1.7129)]] Biases: [Var(v=0.0000, grad=-0.5109), Var(v=0.0000, grad=-0.6711), Var(v=0.0000, grad=0.1402), Var(v=0.0000, grad=0.0446), Var(v=0.0000, grad=0.9380)]
layer 1 Weights: [[Var(v=-0.0962, grad=-2.1342)], [Var(v=-0.1108, grad=0.1883)], [Var(v=0.0234, grad=-0.7154)], [Var(v=0.0074, grad=0.1774)], [Var(v=0.1632, grad=-1.3832)]] Biases: [Var(v=0.0000, grad=6.0634)]
###Markdown
Exercise h) Putting it all togetherWe are ready to train some neural networks!We'll train the neural network for 100 gradient updates. Each gradient will be calculated on the average loss over a minibatch of samples. Read and understand the code below. Answer the inline comment questions. We'll plot the loss for each batch, which should decrease steadily.
###Code
mlp = MLP([
DenseLayer(1, 16, lambda x: x.tanh()),
DenseLayer(16, 1, lambda x: x)
]) # What does this line do?
# Creates a neural network with 1 input, a hidden layer of 16 neurons, and a single output
# It uses the tanh as the activation function for the hidden layer and no function for the output layer
learning_rate = 0.01 # Try different learning rates
optim = SGD(mlp.parameters(), learning_rate) # What does this line do? Intializes the SGD class to use for backpropagation
batch_size = 64
losses = []
for i in tqdm.tqdm(range(100)):
loss = Var(0.0)
for _ in range(batch_size): # What does this loop do? Runs through batch_size samples of out training data and accumulates the loss.
x, y_target = random.choice(train_data) # What does this line do? #Samples from the training data
x = Var(x)
y_target = Var(y_target)
y = mlp.forward([x])
loss += squared_loss(y_target, y[0])
loss = loss / Var(batch_size) # What does this line do? #Averages out the loss from the previous loop (mini_batch)
losses.append(loss.v)
optim.zero_grad() # Why do we need to call zero_grad here? #To reset the gradient from the previous mini_batch because we start a new training session
loss.backward() # What does this line do? # Calculates the gradient with respect to each parameter
optim.step()# What does this line do? # Updates the parameters based on the gradients
plt.plot(losses, '.')
plt.ylabel('L2 loss')
plt.xlabel('Batches')
plt.show()
###Output
100%|โโโโโโโโโโ| 100/100 [00:01<00:00, 66.89it/s]
###Markdown
The plot should look similar to:  Let's also plot the data and what the Neural Network has learned.
###Code
for _ in range(100):
x, y_target = sample_data()
y = mlp.forward([Var(x)])
plt.plot(x, y_target, 'b.')
plt.plot(x, y[0].v, 'r.')
plt.title('True (blue) and MLP approx (red)')
plt.show()
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-06/lab.ipynb) Contents and why we need this labThis lab is about implementing neural networks yourself from scratch. All the modern frameworks for deep learning use automatic differentiation (autodiff) so you don't have to code the backward step yourself. In this version of this lab you will develop your own autodif implementation, and use this to build a simple neural network. Once you've done this lab you should have a very good understanding of what goes on below the hood in the modern framework such as [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) or [JAX](https://github.com/google/jax). In particular the code we'll develop will look quite similar to the pytorch API. External sources of information1. Jupyter notebook. You can find more information about Jupyter notebooks [here](https://jupyter.org/). It will come as part of the [Anaconda](https://www.anaconda.com/) Python installation. You can also use [colab](colab.to), which is a free online jupyter notebook.3. [Nanograd](https://github.com/rasmusbergpalm/nanograd) is a minimalistic version of autodiff developed by Rasmus Berg Palm that we use for our framework. Nanograd automatic differention framework The [Nanograd](https://github.com/rasmusbergpalm/nanograd) framework defines a class Var which both holds a value and gradient value that we can use to store the intermediate values when we apply the chain rule of differentiation.
###Code
# Copy and pasted from https://github.com/rasmusbergpalm/nanograd/blob/main/nanograd.py
from typing import Union
from math import tanh
class Var:
"""
A variable which holds a number and enables gradient computations.
"""
def __init__(self, val: Union[float, int], parents=None):
assert type(val) in {float, int}
if parents is None:
parents = []
self.v = val
self.parents = parents
self.grad = 0.0
def backprop(self, bp):
self.grad += bp
for parent, grad in self.parents:
parent.backprop(grad * bp)
def backward(self):
self.backprop(1.0)
def __add__(self: 'Var', other: 'Var') -> 'Var':
return Var(self.v + other.v, [(self, 1.0), (other, 1.0)])
def __mul__(self: 'Var', other: 'Var') -> 'Var':
return Var(self.v * other.v, [(self, other.v), (other, self.v)])
def __pow__(self, power: Union[float, int]) -> 'Var':
assert type(power) in {float, int}, "power must be float or int"
return Var(self.v ** power, [(self, power * self.v ** (power - 1))])
def __neg__(self: 'Var') -> 'Var':
return Var(-1.0) * self
def __sub__(self: 'Var', other: 'Var') -> 'Var':
return self + (-other)
def __truediv__(self: 'Var', other: 'Var') -> 'Var':
return self * other ** -1
def tanh(self) -> 'Var':
return Var(tanh(self.v), [(self, 1 - tanh(self.v) ** 2)])
def relu(self) -> 'Var':
return Var(self.v if self.v > 0.0 else 0.0, [(self, 1.0 if self.v > 0.0 else 0.0)])
def __repr__(self):
return "Var(v=%.4f, grad=%.4f)" % (self.v, self.grad)
###Output
_____no_output_____
###Markdown
A few examples illustrate how we can use this:
###Code
a = Var(3.0)
b = Var(5.0)
f = a * b
f.backward()
for v in [a, b, f]:
print(v)
a = Var(3.0)
b = Var(5.0)
c = a * b
d = Var(9.0)
e = a * d
f = c + e
f.backward()
for v in [a, b, c, d, e, f]:
print(v)
###Output
Var(v=3.0000, grad=14.0000)
Var(v=5.0000, grad=3.0000)
Var(v=15.0000, grad=1.0000)
Var(v=9.0000, grad=3.0000)
Var(v=27.0000, grad=1.0000)
Var(v=42.0000, grad=1.0000)
###Markdown
Exercise a) What is being calculated?Explain briefly the output of the code? What is the expression we differentiate and with respect to what variables? Exercise b) How does the backward function work?For the first example above, execute the backward function by hand to convince yourself that it indeed calculates the gradients with respect to the variables. Write down the sequence of calls to backprop for the first example above. Exercise c) What happens if we run backward again?Try to execute the code below. Explain what happens.
###Code
f.backward()
for v in [a, b, c, d, e, f]:
print(v)
###Output
Var(v=3.0000, grad=28.0000)
Var(v=5.0000, grad=6.0000)
Var(v=15.0000, grad=2.0000)
Var(v=9.0000, grad=6.0000)
Var(v=27.0000, grad=2.0000)
Var(v=42.0000, grad=2.0000)
###Markdown
Exercise d) Test correctness of derivatives with the finite difference methodWrite a small function that uses [the finite difference method](https://en.wikipedia.org/wiki/Finite_difference_method) to numerically compute the gradient:$$\frac{\partial f(x)}{\partial x} \approx \frac{f(x+dx)-f(x)}{dx}$$for a very small $dx$.
###Code
def finite_difference(fn, x_val, dx=1e-10):
"""
Computes the finite difference numerical approximation to the derivative of fn(x) with respect to x at x_val: (fn(x_val + dx) - fn(x_val))/dx
"""
f = fn(x_val+dx)
g = fn(x_val)
return (f-g)/dx
###Output
_____no_output_____
###Markdown
Use your finite difference function to compute the gradient of $f$ with respect to $a$ and $b$ in the following function: $f(x) = a \cdot b + b$, at a=3 and b=5.
###Code
# test function - try to change into other functions as well
def f(a, b):
return a*b + b
a = Var(3.0)
b = Var(5.0)
f = a * b + b
for v in [a,b,f]:
print(v)
f.backward()
for v in [a,b,f]:
print(v)
###Output
Var(v=3.0000, grad=0.0000)
Var(v=5.0000, grad=0.0000)
Var(v=20.0000, grad=0.0000)
Var(v=3.0000, grad=5.0000)
Var(v=5.0000, grad=4.0000)
Var(v=20.0000, grad=1.0000)
###Markdown
Write the same function using Nanograd `Var`s and verify that Nanograd computes the same gradients
###Code
def fn(a,b):
return a
print(finite_difference(fn(3,5),3))
###Output
_____no_output_____
###Markdown
Create an artificial dataset to play withWe create a non-linear 1d regression task. The generator supports various noise levels. You can modify it yourself if you want more or less challenging tasks.
###Code
from math import sin
import random
import tqdm as tqdm
import matplotlib.pyplot as plt
def sample_data(noise=0.3):
x = (random.random() - 0.5) * 10
return x, sin(x) + x + random.gauss(0, noise)
train_data = [sample_data() for _ in range(100)]
val_data = [sample_data() for _ in range(100)]
for x, y in train_data:
plt.plot(x, y, 'b.')
plt.show()
###Output
_____no_output_____
###Markdown
Building the neural network.We'll create a feedforward neural network consisting of a series of dense layers. See the image below. Each dense layer is just a number of artificial neurons. In the image below each column of circles (neurons) is a dense layer. It's dense because the weight matrix is dense; there's a connection between every input and every output neuron in the layer.The inputs to create a dense layer is following:1. **The input size and output size**. We have to define the number of inputs and outputs. The inputs are the number of inputs to the layer, and the output size is the number of artificial neurons the layer has.2. **Activation functions**. Each dense layer must have an activation function (it can also be the linear activation which is equivalent to identity function). The power of neural networks comes from non-linear activation functions.3. **Parameter initialization**. We will initialize the weights to have random values. This is done in practice by drawing pseudo random numbers from a Gaussian or uniform distribution. It turns out that for deeper models we have to be careful about how we scale the random numbers. This will be the topic of a later exercice. For now we will just use simple Gaussians. See the `Initializer` class below.Note that we use Sequence in the code below. A Sequence is an ordered list. This means the order we insert and access items are the same. 
###Code
from typing import Sequence
class Initializer:
def init_weights(self, n_in, n_out) -> Sequence[Sequence[Var]]:
raise NotImplementedError
def init_bias(self, n_out) -> Sequence[Var]:
raise NotImplementedError
class NormalInitializer(Initializer):
def __init__(self, mean=0, std=0.1):
self.mean = mean
self.std = std
def init_weights(self, n_in, n_out):
return [[Var(random.gauss(self.mean, self.std)) for _ in range(n_out)] for _ in range(n_in)]
def init_bias(self, n_out):
return [Var(0.0) for _ in range(n_out)]
###Output
_____no_output_____
###Markdown
Exercise e) Dense layerComplete the DenseLayer class below. The dense layer takes an input vector and computes an output vector corresponding to the value of each artificial neuron in the dense layer.
###Code
class DenseLayer:
def __init__(self, n_in: int, n_out: int, act_fn, initializer: Initializer = NormalInitializer()):
"""
n_in: the number of inputs to the layer
n_out: the number of output neurons in the layer
act_fn: the non-linear activation function for each neuron
initializer: The initializer to use to initialize the weights and biases
"""
self.weights = initializer.init_weights(n_in, n_out)
self.bias = initializer.init_bias(n_out)
self.act_fn = act_fn
def __repr__(self):
return 'Weights: ' + repr(self.weights) + ' Biases: ' + repr(self.bias)
def parameters(self) -> Sequence[Var]:
"""Returns all the vars of the layer (weights + biases) as a single flat list"""
toret = []
for i in range(len(self.weights)):
for j in range(len(self.weights[0])):
toret.append(self.weights[i][j])
for i in range(len(self.bias)):
toret.append(self.bias[i])
return toret
def forward(self, inputs: Sequence[Var]) -> Sequence[Var]:
"""
inputs: A n_in length vector of Var's corresponding to the previous layer outputs or the data if it's the first layer.
Computes the forward pass of the dense layer: For each output neuron, j, it computes: act_fn(weights[i][j]*inputs[i] + bias[j])
Returns a vector of Vars that is n_out long.
"""
assert len(self.weights) == len(inputs), "weights and inputs must match in first dimension"
ls = []
added = Var(0.0)
for j in range(len(self.weights[0])):
added = Var(0.0)
added.v = 0.0
for i in range(len(inputs)):
added += self.weights[i][j] * inputs[i]
#ls.append(self.act_fn(self.weights[i][j] * inputs[i] + self.bias[j]))
ls.append(self.act_fn(added + self.bias[j]))
return ls
###Output
_____no_output_____
###Markdown
Verify that your class is correct by running the code below, and verifying that `actual` is the same as `expected`. Here we define a small 3x2 dense layer with some fixed parameters and use numpy to compute the expected values.
###Code
import numpy as np
np.random.seed(0)
w = np.random.randn(3, 2)
print(w)
b = np.random.randn(2)
print(b)
x = np.random.randn(3)
print(x)
expected = np.tanh(x@w+b)
class FixedInit(Initializer):
"""
An initializer used for debugging that will return the w and b variables defined above regardless of the input and output size.
"""
def init_weights(self, n_in, n_out):
return [list(map(Var, r.tolist())) for r in w]
def init_bias(self, n_out):
return list(map(Var, b.tolist()))
layer = DenseLayer(3, 2, lambda x: x.tanh(), FixedInit())
var_x = list(map(Var, x.tolist()))
actual = layer.forward(var_x)
print(actual)
print(expected)
###Output
[[ 1.76405235 0.40015721]
[ 0.97873798 2.2408932 ]
[ 1.86755799 -0.97727788]]
[ 0.95008842 -0.15135721]
[-0.10321885 0.4105985 0.14404357]
[Var(v=0.8935, grad=0.0000), Var(v=0.5275, grad=0.0000)]
[0.89347265 0.52750061]
###Markdown
Exercise f) MLPWe'll now combine multiple DenseLayers into a neural network. We'll define a class to help us with this. We name it Multi-Layer Perceptron (MLP), since in the "old days", a single dense layer neural network was called a perceptron. It takes a list of DenseLayer as input and defines a forward function. The forward function takes a vector of inputs, the data inputs, and return a vector of outputs, the output of the neural network, after being passed through each layer of the network. It also has a parameters function which just returns all the parameters of the layers as a single flat list.Complete the MLP class below.
###Code
class MLP:
def __init__(self, layers: Sequence[DenseLayer]):
self.layers = layers
def parameters(self) -> Sequence[Var]:
""" Returns all the parameters of the layers as a flat list"""
all_param = []
for layer in self.layers:
all_param += layer.parameters()
return all_param
def forward(self, x: Sequence[Var]) -> Sequence[Var]:
"""
Computes the forward pass of the MLP: x = layer(x) for each layer in layers
"""
inp = x
for layer in self.layers:
x = layer.forward(x)
return x
###Output
_____no_output_____
###Markdown
Exercise g) SGDNow we need code that will perform the stochastic gradient descent. Complete the class below
###Code
class SGD:
def __init__(self, parameters: Sequence[Var], learning_rate: float):
self.parameters = parameters
self.learning_rate = learning_rate
def zero_grad(self):
""" Set the gradient to zero for all parameters """
for parameter in self.parameters:
parameter.grad = 0
def step(self):
"""Performs a single step of SGD for each parameter: p = p - learning_rate * grad_p """
for parameter in self.parameters:
parameter.v = parameter.v - self.learning_rate * parameter.grad
###Output
_____no_output_____
###Markdown
Loss functionsWe are only missing a loss function now. We're doing regression so we'll use the L2 loss function $L2(t, y) = (t-y)^2$, where $t$ is the expected output (the target) and $y$ is the output of the neural network.
###Code
def squared_loss(t: Var, y: Var) -> Var:
return (t-y)**2
###Output
_____no_output_____
###Markdown
Backward passNow the magic happens! We get the calculation of the gradients for free. Let's see how it works.
###Code
mlp = MLP([
DenseLayer(1, 5, lambda x: x.tanh()),
DenseLayer(5, 1, lambda x: x)
])
x, t = sample_data()
x = Var(x)
t = Var(t)
y = mlp.forward([x])
loss = squared_loss(t, y[0])
loss.backward()
###Output
_____no_output_____
###Markdown
and the gradients will be calculated:
###Code
for i,layer in enumerate(mlp.layers):
print("layer", i, layer)
###Output
layer 0 Weights: [[Var(v=-0.0014, grad=-0.9652), Var(v=-0.0525, grad=-0.8994), Var(v=-0.1696, grad=-0.4621), Var(v=0.0303, grad=0.1691), Var(v=-0.0462, grad=0.0690)]] Biases: [Var(v=0.0000, grad=0.7676), Var(v=0.0000, grad=0.7153), Var(v=0.0000, grad=0.3675), Var(v=0.0000, grad=-0.1345), Var(v=0.0000, grad=-0.0549)]
layer 1 Weights: [[Var(v=0.1857, grad=0.0074)], [Var(v=0.1738, grad=0.2725)], [Var(v=0.0930, grad=0.8687)], [Var(v=-0.0326, grad=-0.1576)], [Var(v=-0.0133, grad=0.2401)]] Biases: [Var(v=0.0000, grad=4.1341)]
###Markdown
Exercise h) Putting it all togetherWe are ready to train some neural networks!We'll train the neural network for 100 gradient updates. Each gradient will be calculated on the average loss over a minibatch of samples. Read and understand the code below. Answer the inline comment questions. We'll plot the loss for each batch, which should decrease steadily.
###Code
mlp = MLP([
DenseLayer(1, 16, lambda x: x.tanh()),
DenseLayer(16, 1, lambda x: x)
]) # What does this line do?
learning_rate = 0.01 #Try different learning rates
optim = SGD(mlp.parameters(), learning_rate) # What does this line do?
batch_size = 64
losses = []
for i in tqdm.tqdm(range(100)):
loss = Var(0.0)
for _ in range(batch_size): # What does this loop do?
x, y_target = random.choice(train_data) # What does this line do?
x = Var(x)
y_target = Var(y_target)
y = mlp.forward([x])
loss += squared_loss(y_target, y[0])
loss = loss / Var(batch_size) # What does this line do?
losses.append(loss.v)
optim.zero_grad() # Why do we need to call zero_grad here?
loss.backward() # What does this line do?
optim.step()# What does this line do?
plt.plot(losses, '.')
plt.ylabel('L2 loss')
plt.xlabel('Batches')
plt.show()
###Output
100%|โโโโโโโโโโ| 100/100 [00:02<00:00, 45.62it/s]
###Markdown
The plot should look similar to:  Let's also plot the data and what the Neural Network has learned.
###Code
for _ in range(100):
x, y_target = sample_data()
y = mlp.forward([Var(x)])
plt.plot(x, y_target, 'b.')
plt.plot(x, y[0].v, 'r.')
plt.title('True (blue) and MLP approx (red)')
plt.show()
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/real-itu/modern-ai-course/blob/master/lecture-06/lab.ipynb) Contents and why we need this labThis lab is about implementing neural networks yourself from scratch. All the modern frameworks for deep learning use automatic differentiation (autodiff) so you don't have to code the backward step yourself. In this version of this lab you will develop your own autodif implementation, and use this to build a simple neural network. Once you've done this lab you should have a very good understanding of what goes on below the hood in the modern framework such as [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) or [JAX](https://github.com/google/jax). In particular the code we'll develop will look quite similar to the pytorch API. External sources of information1. Jupyter notebook. You can find more information about Jupyter notebooks [here](https://jupyter.org/). It will come as part of the [Anaconda](https://www.anaconda.com/) Python installation. You can also use [colab](colab.to), which is a free online jupyter notebook.3. [Nanograd](https://github.com/rasmusbergpalm/nanograd) is a minimalistic version of autodiff developed by Rasmus Berg Palm that we use for our framework. Nanograd automatic differention framework The [Nanograd](https://github.com/rasmusbergpalm/nanograd) framework defines a class Var which both holds a value and gradient value that we can use to store the intermediate values when we apply the chain rule of differentiation.
###Code
# Copy and pasted from https://github.com/rasmusbergpalm/nanograd/blob/main/nanograd.py
from typing import Union
from math import tanh
class Var:
"""
A variable which holds a number and enables gradient computations.
"""
def __init__(self, val: Union[float, int], parents=None):
assert type(val) in {float, int}
if parents is None:
parents = []
self.v = val
self.parents = parents
self.grad = 0.0
def backprop(self, bp):
self.grad += bp
for parent, grad in self.parents:
parent.backprop(grad * bp)
def backward(self):
self.backprop(1.0)
def __add__(self: 'Var', other: 'Var') -> 'Var':
return Var(self.v + other.v, [(self, 1.0), (other, 1.0)])
def __mul__(self: 'Var', other: 'Var') -> 'Var':
return Var(self.v * other.v, [(self, other.v), (other, self.v)])
def __pow__(self, power: Union[float, int]) -> 'Var':
assert type(power) in {float, int}, "power must be float or int"
return Var(self.v ** power, [(self, power * self.v ** (power - 1))])
def __neg__(self: 'Var') -> 'Var':
return Var(-1.0) * self
def __sub__(self: 'Var', other: 'Var') -> 'Var':
return self + (-other)
def __truediv__(self: 'Var', other: 'Var') -> 'Var':
return self * other ** -1
def tanh(self) -> 'Var':
return Var(tanh(self.v), [(self, 1 - tanh(self.v) ** 2)])
def relu(self) -> 'Var':
return Var(self.v if self.v > 0.0 else 0.0, [(self, 1.0 if self.v > 0.0 else 0.0)])
def __repr__(self):
return "Var(v=%.4f, grad=%.4f)" % (self.v, self.grad)
###Output
_____no_output_____
###Markdown
A few examples illustrate how we can use this:
###Code
a = Var(3.0)
b = Var(5.0)
f = a * b
f.backward()
for v in [a, b, f]:
print(v)
a = Var(3.0)
b = Var(5.0)
c = a * b
d = Var(9.0)
e = a * d
f = c + e
f.backward()
for v in [a, b, c, d, e, f]:
print(v)
###Output
_____no_output_____
###Markdown
Exercise a) What is being calculated?Explain briefly the output of the code? What is the expression we differentiate and with respect to what variables? Exercise b) How does the backward function work?For the first example above, execute the backward function by hand to convince yourself that it indeed calculates the gradients with respect to the variables. Write down the sequence of calls to backprop for the first example above. Exercise c) What happens if we run backward again?Try to execute the code below. Explain what happens.
###Code
f.backward()
for v in [a, b, c, d, e, f]:
print(v)
###Output
_____no_output_____
###Markdown
Exercise d) Test correctness of derivatives with the finite difference methodWrite a small function that uses [the finite difference method](https://en.wikipedia.org/wiki/Finite_difference_method) to numerically compute the gradient:$$\frac{\partial f(x)}{\partial x} \approx \frac{f(x+dx)-f(x)}{dx}$$for a very small $dx$.
###Code
def finite_difference(fn, x_val, dx=1e-10):
"""
Computes the finite difference numerical approximation to the derivative of fn(x) with respect to x at x_val: (fn(x_val + dx) - fn(x_val))/dx
"""
pass #Insert code
###Output
_____no_output_____
###Markdown
Use your finite difference function to compute the gradient of $f$ with respect to $a$ and $b$ in the following function: $f(x) = a \cdot b + b$, at a=3 and b=5.
###Code
# test function - try to change into other functions as well
def f(a, b):
return a*b + b
pass #Insert code
###Output
_____no_output_____
###Markdown
Write the same function using Nanograd `Var`s and verify that Nanograd computes the same gradients
###Code
pass #Insert code
###Output
_____no_output_____
###Markdown
Create an artificial dataset to play withWe create a non-linear 1d regression task. The generator supports various noise levels. You can modify it yourself if you want more or less challenging tasks.
###Code
from math import sin
import random
import tqdm as tqdm
import matplotlib.pyplot as plt
def sample_data(noise=0.3):
x = (random.random() - 0.5) * 10
return x, sin(x) + x + random.gauss(0, noise)
train_data = [sample_data() for _ in range(100)]
val_data = [sample_data() for _ in range(100)]
for x, y in train_data:
plt.plot(x, y, 'b.')
plt.show()
###Output
_____no_output_____
###Markdown
Building the neural network.We'll create a feedforward neural network consisting of a series of dense layers. See the image below. Each dense layer is just a number of artificial neurons. In the image below each column of circles (neurons) is a dense layer. It's dense because the weight matrix is dense; there's a connection between every input and every output neuron in the layer.The inputs to create a dense layer is following:1. **The input size and output size**. We have to define the number of inputs and outputs. The inputs are the number of inputs to the layer, and the output size is the number of artificial neurons the layer has.2. **Activation functions**. Each dense layer must have an activation function (it can also be the linear activation which is equivalent to identity function). The power of neural networks comes from non-linear activation functions.3. **Parameter initialization**. We will initialize the weights to have random values. This is done in practice by drawing pseudo random numbers from a Gaussian or uniform distribution. It turns out that for deeper models we have to be careful about how we scale the random numbers. This will be the topic of a later exercice. For now we will just use simple Gaussians. See the `Initializer` class below.Note that we use Sequence in the code below. A Sequence is an ordered list. This means the order we insert and access items are the same. 
###Code
from typing import Sequence
class Initializer:
def init_weights(self, n_in, n_out) -> Sequence[Sequence[Var]]:
raise NotImplementedError
def init_bias(self, n_out) -> Sequence[Var]:
raise NotImplementedError
class NormalInitializer(Initializer):
def __init__(self, mean=0, std=0.1):
self.mean = mean
self.std = std
def init_weights(self, n_in, n_out):
return [[Var(random.gauss(self.mean, self.std)) for _ in range(n_out)] for _ in range(n_in)]
def init_bias(self, n_out):
return [Var(0.0) for _ in range(n_out)]
###Output
_____no_output_____
###Markdown
Exercise e) Dense layerComplete the DenseLayer class below. The dense layer takes an input vector and computes an output vector corresponding to the value of each artificial neuron in the dense layer.
###Code
class DenseLayer:
def __init__(self, n_in: int, n_out: int, act_fn, initializer: Initializer = NormalInitializer()):
"""
n_in: the number of inputs to the layer
n_out: the number of output neurons in the layer
act_fn: the non-linear activation function for each neuron
initializer: The initializer to use to initialize the weights and biases
"""
self.weights = initializer.init_weights(n_in, n_out)
self.bias = initializer.init_bias(n_out)
self.act_fn = act_fn
def __repr__(self):
return 'Weights: ' + repr(self.weights) + ' Biases: ' + repr(self.bias)
def parameters(self) -> Sequence[Var]:
"""Returns all the vars of the layer (weights + biases) as a single flat list"""
pass #Insert code
def forward(self, inputs: Sequence[Var]) -> Sequence[Var]:
"""
inputs: A n_in length vector of Var's corresponding to the previous layer outputs or the data if it's the first layer.
Computes the forward pass of the dense layer: For each output neuron, j, it computes: act_fn(weights[i][j]*inputs[i] + bias[j])
Returns a vector of Vars that is n_out long.
"""
assert len(self.weights) == len(inputs), "weights and inputs must match in first dimension"
pass #Insert code
###Output
_____no_output_____
###Markdown
Verify that your class is correct by running the code below, and verifying that `actual` is the same as `expected`. Here we define a small 3x2 dense layer with some fixed parameters and use numpy to compute the expected values.
###Code
import numpy as np
np.random.seed(0)
w = np.random.randn(3, 2)
b = np.random.randn(2)
x = np.random.randn(3)
expected = np.tanh(x@w+b)
class FixedInit(Initializer):
"""
An initializer used for debugging that will return the w and b variables defined above regardless of the input and output size.
"""
def init_weights(self, n_in, n_out):
return [list(map(Var, r.tolist())) for r in w]
def init_bias(self, n_out):
return list(map(Var, b.tolist()))
layer = DenseLayer(3, 2, lambda x: x.tanh(), FixedInit())
var_x = list(map(Var, x.tolist()))
actual = layer.forward(var_x)
print(actual)
print(expected)
###Output
_____no_output_____
###Markdown
Exercise f) MLPWe'll now combine multiple DenseLayers into a neural network. We'll define a class to help us with this. We name it Multi-Layer Perceptron (MLP), since in the "old days", a single dense layer neural network was called a perceptron. It takes a list of DenseLayer as input and defines a forward function. The forward function takes a vector of inputs, the data inputs, and return a vector of outputs, the output of the neural network, after being passed through each layer of the network. It also has a parameters function which just returns all the parameters of the layers as a single flat list.Complete the MLP class below.
###Code
class MLP:
def __init__(self, layers: Sequence[DenseLayer]):
self.layers = layers
def parameters(self) -> Sequence[Var]:
""" Returns all the parameters of the layers as a flat list"""
pass #Insert code
def forward(self, x: Sequence[Var]) -> Sequence[Var]:
"""
Computes the forward pass of the MLP: x = layer(x) for each layer in layers
"""
pass #Insert code
###Output
_____no_output_____
###Markdown
Exercise g) SGDNow we need code that will perform the stochastic gradient descent. Complete the class below
###Code
class SGD:
def __init__(self, parameters: Sequence[Var], learning_rate: float):
self.parameters = parameters
self.learning_rate = learning_rate
def zero_grad(self):
""" Set the gradient to zero for all parameters """
pass #Insert code
def step(self):
"""Performs a single step of SGD for each parameter: p = p - learning_rate * grad_p """
pass #Insert code
###Output
_____no_output_____
###Markdown
Loss functionsWe are only missing a loss function now. We're doing regression so we'll use the L2 loss function $L2(t, y) = (t-y)^2$, where $t$ is the expected output (the target) and $y$ is the output of the neural network.
###Code
def squared_loss(t: Var, y: Var) -> Var:
return (t-y)**2
###Output
_____no_output_____
###Markdown
Backward passNow the magic happens! We get the calculation of the gradients for free. Let's see how it works.
###Code
mlp = MLP([
DenseLayer(1, 5, lambda x: x.tanh()),
DenseLayer(5, 1, lambda x: x)
])
x, t = sample_data()
x = Var(x)
t = Var(t)
y = mlp.forward([x])
loss = squared_loss(t, y[0])
loss.backward()
###Output
_____no_output_____
###Markdown
and the gradients will be calculated:
###Code
for i,layer in enumerate(mlp.layers):
print("layer", i, layer)
###Output
_____no_output_____
###Markdown
Exercise h) Putting it all togetherWe are ready to train some neural networks!We'll train the neural network for 100 gradient updates. Each gradient will be calculated on the average loss over a minibatch of samples. Read and understand the code below. Answer the inline comment questions. We'll plot the loss for each batch, which should decrease steadily.
###Code
mlp = MLP([
DenseLayer(1, 16, lambda x: x.tanh()),
DenseLayer(16, 1, lambda x: x)
]) # What does this line do?
learning_rate = 0.01 # Try different learning rates
optim = SGD(mlp.parameters(), learning_rate) # What does this line do?
batch_size = 64
losses = []
for i in tqdm.tqdm(range(100)):
loss = Var(0.0)
for _ in range(batch_size): # What does this loop do?
x, y_target = random.choice(train_data) # What does this line do?
x = Var(x)
y_target = Var(y_target)
y = mlp.forward([x])
loss += squared_loss(y_target, y[0])
loss = loss / Var(batch_size) # What does this line do?
losses.append(loss.v)
optim.zero_grad() # Why do we need to call zero_grad here?
loss.backward() # What does this line do?
optim.step()# What does this line do?
plt.plot(losses, '.')
plt.ylabel('L2 loss')
plt.xlabel('Batches')
plt.show()
###Output
_____no_output_____
###Markdown
The plot should look similar to:  Let's also plot the data and what the Neural Network has learned.
###Code
for _ in range(100):
x, y_target = sample_data()
y = mlp.forward([Var(x)])
plt.plot(x, y_target, 'b.')
plt.plot(x, y[0].v, 'r.')
plt.title('True (blue) and MLP approx (red)')
plt.show()
###Output
_____no_output_____
###Markdown
The plot should look similar to this: 
###Code
###Output
_____no_output_____ |
FashionTrainingInception.ipynb | ###Markdown
Setup directorycloning and merging directories together.
###Code
!git clone https://github.com/aryapei/In-shop-Clothes-From-Deepfashion.git
!rsync -a ./In-shop-Clothes-From-Deepfashion/Img/MEN/ ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/
###Output
Cloning into 'In-shop-Clothes-From-Deepfashion'...
remote: Enumerating objects: 4, done.[K
remote: Counting objects: 100% (4/4), done.[K
remote: Compressing objects: 100% (4/4), done.[K
remote: Total 30676 (delta 0), reused 3 (delta 0), pack-reused 30672[K
Receiving objects: 100% (30676/30676), 397.23 MiB | 36.02 MiB/s, done.
Resolving deltas: 100% (16/16), done.
Checking out files: 100% (26451/26451), done.
###Markdown
Define Neural NetworkCreate a ConvNet instance and remove last layer to implement transfert learning.:warning: do not forget to freeze pretrained model reduce training workload.
###Code
#!/usr/bin/env python3
""" Low Cost Transfert Learning on CIBR with Inceptionv3 ConvNet
Description:
============
see this script as a disappointment to me.
Was hoping to correctly use ~~~InceptionV3~~~ VGG16 model by freezing the layers and fitting data generator to train this ConvNet.
The current script collect extracted features from ~~~InceptionV3~~~ VGG16 and names to write Hierarchical Data Format file.
Required setup:
===============
$ git clone https://github.com/aryapei/In-shop-Clothes-From-Deepfashion.git
$ rsync -a ./In-shop-Clothes-From-Deepfashion/Img/MEN/ ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/
Thoses commands clone and merge current Fashion dataset hosted at https://github.com/aryapei/In-shop-Clothes-From-Deepfashion in the same folder ./In-shop-Clothes-From-Deepfashion/Img/WOMEN/
"""
import numpy as np
from numpy import linalg as LA
import os
import h5py
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
img_side_size = 299
class ConvNet:
def __init__(self):
self.model = InceptionResNetV2(input_shape=(img_side_size, img_side_size, 3), weights="imagenet", include_top=False, pooling="max")
self.model.predict(np.zeros((1, img_side_size, img_side_size, 3)))
'''
Use inceptionv3 model to extract features
Output normalized feature vector
'''
def extract_feat(self, img_path):
img = image.load_img(img_path, target_size=(img_side_size,img_side_size))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = self.model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
if __name__ == "__main__":
img_dir = "/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN"
img_pattern = f"{img_dir}/**/**/*.jpg"
print(img_pattern)
img_list = glob(img_pattern)
print(f"{' feature extraction starts ':=^120}")
feats = []
names = []
model = ConvNet()
img_list_len = len(img_list)
for i, img_path in enumerate(img_list):
norm_feat = model.extract_feat(img_path)
feats.append(norm_feat)
img_name = '/'.join(img_path.split('/')[-5:])
names.append(img_name)
print(f"({i}/{img_list_len}) feat extraction of {img_name}.")
feats = np.array(feats)
names = np.string_(names)
print(f"{' writing feature extraction results ':=^120}")
h5f = h5py.File("featureCNN.h5", 'w')
h5f.create_dataset('dataset_feat', data=feats)
h5f.create_dataset('dataset_name', data=names)
h5f.close()
import numpy as np
from numpy import linalg as LA
import os
import h5py
from tensorflow.keras.applications import InceptionResNetV2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.inception_resnet_v2 import preprocess_input
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from glob import glob
img_side_size = 299
class ConvNet:
def __init__(self):
self.model = InceptionResNetV2(input_shape=(img_side_size, img_side_size, 3), weights="imagenet", include_top=False, pooling="max")
self.model.predict(np.zeros((1, img_side_size, img_side_size, 3)))
'''
Use inceptionv3 model to extract features
Output normalized feature vector
'''
def extract_feat(self, img_path):
img = image.load_img(img_path, target_size=(img_side_size,img_side_size))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = self.model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
# Read the produced files :
h5f = h5py.File('./featureCNN.h5', 'r')
feats = h5f['dataset_feat'][:]
imgNames = h5f['dataset_name'][:]
h5f.close()
print(f"{' searching starts ':=^120}")
queryDir = '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_1_front.jpg'
queryImg = mpimg.imread(queryDir)
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(queryImg)
plt.title("Query Image")
plt.axis('off')
model = ConvNet()
queryVec = model.extract_feat(queryDir)
scores = np.dot(queryVec, feats.T)
rank_ID = np.argsort(scores)[::-1]
rank_score = scores[rank_ID]
# number of top retrieved images to show
maxres = 10
local = "/content/In-shop-Clothes-From-Deepfashion/"
distant = "https://raw.githubusercontent.com/aryapei/In-shop-Clothes-From-Deepfashion/master/"
imlist = [f"{local}{imgNames[index].decode('utf-8')}" for i,index in enumerate(rank_ID[0:maxres])]
print("top %d images in order are: " % maxres, imlist)
plt.imshow(queryImg)
plt.title("search input")
plt.axis('off')
plt.show()
for i, im in enumerate(imlist):
image = mpimg.imread(im)
plt.imshow(image)
plt.title("search output %d" % (i + 1))
plt.axis('off')
plt.show()
###Output
=================================================== searching starts ===================================================
top 10 images in order are: ['/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_1_front.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Jackets_Coats/id_00003844/01_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Jackets_Coats/id_00001053/02_2_side.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Dresses/id_00001323/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/01_2_side.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00001967/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Dresses/id_00000825/01_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00006965/02_2_side.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Sweaters/id_00000062/02_7_additional.jpg', '/content/In-shop-Clothes-From-Deepfashion/Img/WOMEN/Cardigans/id_00004785/01_7_additional.jpg']
|
[20190921]matplotlib.ipynb | ###Markdown
--- Basic Attributesalpha : ํฌ๋ช
๋ kind : ๊ทธ๋ํ ์ข
๋ฅ 'line', 'bar', 'barh', 'kde' logy : Y์ถ์ ๋ํด Log scaling use_index : ๊ฐ์ฒด์ ์์ธ์ ๋๊ธ ์ด๋ฆ์ผ๋ก ์ฌ์ฉํ ์ง ์ฌ๋ถ rot : ๋๊ธ ์ด๋ฆ ๋๋ฆฌ๊ธฐ (rotating) 0 ~ 360 xticks, yticks : x, y์ถ์ผ๋ก ์ฌ์ฉํ ๊ฐ xlim, ylim : X, Y์ถ์ ํ๊ณ grid : ์ถ์ ๊ทธ๋ฆฌ๋๋ฅผ ํํํ ์ง ์ฌ๋ถsubplots : ๊ฐ column์ ๋
๋ฆฝ๋ subplot ๊ทธ๋ฆฌ๊ธฐ sharex, sharey : subplots=True ์ด๋ฉด ๊ฐ์ X,Y์ถ์ ๊ณต์ ํ๊ณ ๋๊ธ๊ณผ ํ๊ณ๋ฅผ ์ฐ๊ฒฐ figsize : ์์ฑ๋ ๊ทธ๋ํ์ ํฌ๊ธฐ๋ฅผ tuple๋ก ์ง์ title : ๊ทธ๋ํ์ ์ ๋ชฉ ์ง์ legend : subplot์ ๋ฒ๋ก ์ง์ sort_columns : column์ ์ํ๋ฒณ ์์๋ก ๊ทธ๋ฆฐ๋ค. ์ ์ ๊ทธ๋ํ
###Code
data = np.random.randn(50).cumsum()
data
plt.plot(data)
plt.show()
###Output
_____no_output_____
###Markdown
์ฌ๋ฌ๊ทธ๋ํ ๊ทธ๋ฆฌ๊ธฐ
###Code
plt.subplot(1,2,1)
plt.subplot(1,2,2)
plt.show()
###Output
_____no_output_____
###Markdown
Multi Graph ๊ทธ๋ฆฌ๊ธฐ
###Code
hist_data = np.random.randn(100)
scat_data = np.arange(30)
plt.subplot(2,2,1)
plt.plot(data)
plt.subplot(2,2,2)
plt.hist(hist_data,bins=20)
plt.subplot(2,1,2)
plt.scatter(scat_data, np.arange(30) + 3 * np.random.randn(30))
plt.show()
###Output
_____no_output_____
###Markdown
๊ทธ๋ํ ์ ์ต์
- ๊ทธ๋ํ๋ฅผ ๊ทธ๋ฆด ๋ ํ์ ๋๋ ์์ด๋ ๋ง์ปค ํจํด์ ๋ฐ๊พธ๋ ๊ฒ ํ์ธ - ์์: b(ํ๋์), g(์ด๋ก์), r(๋นจ๊ฐ์), c(์ฒญ๋ก์), y(๋
ธ๋์), k(๊ฒ์์), w(ํฐ์) - ๋ง์ปค: o(์), v(์ญ์ผ๊ฐํ), ^(์ผ๊ฐํ), s(๋ค๋ชจ), +(ํ๋ฌ์ค), .(์ )
###Code
plt.plot(data, 'y+')
plt.show()
plt.plot(data, 'v')
###Output
_____no_output_____
###Markdown
๊ทธ๋ํ ์ฌ์ด์ฆ ์กฐ์
###Code
plt.figure(figsize=(10,10))
plt.plot(data, 'k+')
plt.show()
###Output
_____no_output_____
###Markdown
๊ทธ๋ํ ๊ฒน์น๊ธฐ + legend ๋ฌ๊ธฐ
###Code
data = np.random.randn(30).cumsum()
plt.plot(data, 'k--', label='Default')
plt.plot(data, 'k-', drawstyle='steps-post', label='steps_post')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
์ด๋ฆ ๋ฌ๊ธฐ
###Code
plt.plot(np.random.randn(1000).cumsum())
plt.title('Random Graph')
plt.xlabel('Stages')
plt.ylabel('Values')
plt.show()
###Output
_____no_output_____
###Markdown
์ ์ฅํ๊ธฐ
###Code
plt.savefig('saved_graph.svg')
###Output
_____no_output_____ |
inference_engine/efficientdet_pytorch/lib/tutorial/train_shape.ipynb | ###Markdown
EfficientDet Training On A Custom Dataset View source on github Run in Google Colab This tutorial will show you how to train a custom dataset. For the sake of simplicity, I generated a dataset of different shapes, like rectangles, triangles, circles. Please enable GPU support to accelerate on notebook setting if you are using colab. 0. Install Requirements
###Code
!pip install pycocotools numpy==1.16.0 opencv-python tqdm tensorboard tensorboardX pyyaml webcolors matplotlib
!pip install torch==1.4.0
!pip install torchvision==0.5.0
###Output
Requirement already satisfied: pycocotools in /usr/local/lib/python3.6/dist-packages (2.0.0)
Collecting numpy==1.16.0
[?25l Downloading https://files.pythonhosted.org/packages/7b/74/54c5f9bb9bd4dae27a61ec1b39076a39d359b3fb7ba15da79ef23858a9d8/numpy-1.16.0-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 17.3MB 215kB/s
[?25hRequirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (4.1.2.30)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (4.41.1)
Requirement already satisfied: tensorboard in /usr/local/lib/python3.6/dist-packages (2.2.2)
Collecting tensorboardX
[?25l Downloading https://files.pythonhosted.org/packages/35/f1/5843425495765c8c2dd0784a851a93ef204d314fc87bcc2bbb9f662a3ad1/tensorboardX-2.0-py2.py3-none-any.whl (195kB)
[K |โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ| 204kB 38.5MB/s
[?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (3.13)
Collecting webcolors
Downloading https://files.pythonhosted.org/packages/12/05/3350559de9714b202e443a9e6312937341bd5f79f4e4f625744295e7dd17/webcolors-1.11.1-py3-none-any.whl
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (3.2.1)
Requirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from tensorboard) (0.34.2)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (2.23.0)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.6.0.post3)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (47.1.1)
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.12.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (0.4.1)
Requirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (3.10.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.0.1)
Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (0.9.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (3.2.2)
Requirement already satisfied: grpcio>=1.24.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.29.0)
Requirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.6/dist-packages (from tensorboard) (1.17.2)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (0.10.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.4.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (1.2.0)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib) (2.8.1)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (2.9)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (2020.4.5.2)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests<3,>=2.21.0->tensorboard) (3.0.4)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.6/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard) (1.3.0)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from markdown>=2.6.8->tensorboard) (1.6.1)
Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard) (0.2.8)
Requirement already satisfied: rsa<5,>=3.1.4; python_version >= "3" in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard) (4.6)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from google-auth<2,>=1.6.3->tensorboard) (4.1.0)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard) (3.1.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard) (3.1.0)
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.6/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard) (0.4.8)
[31mERROR: umap-learn 0.4.4 has requirement numpy>=1.17, but you'll have numpy 1.16.0 which is incompatible.[0m
[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.[0m
[31mERROR: albumentations 0.1.12 has requirement imgaug<0.2.7,>=0.2.5, but you'll have imgaug 0.2.9 which is incompatible.[0m
Installing collected packages: numpy, tensorboardX, webcolors
Found existing installation: numpy 1.18.5
Uninstalling numpy-1.18.5:
Successfully uninstalled numpy-1.18.5
Successfully installed numpy-1.16.0 tensorboardX-2.0 webcolors-1.11.1
###Markdown
1. Prepare Custom Dataset/Pretrained Weights (Skip this part if you already have datasets and weights of your own)
###Code
import os
import sys
if "projects" not in os.getcwd():
!git clone --depth 1 https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch
os.chdir('Yet-Another-EfficientDet-Pytorch')
sys.path.append('.')
else:
!git pull
# download and unzip dataset
! mkdir datasets
! wget https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.1/dataset_shape.tar.gz
! tar xzf dataset_shape.tar.gz
# download pretrained weights
! mkdir weights
! wget https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.0/efficientdet-d0.pth -O weights/efficientdet-d0.pth
# prepare project file projects/shape.yml
# showing its contents here
! cat projects/shape.yml
###Output
Cloning into 'Yet-Another-EfficientDet-Pytorch'...
remote: Enumerating objects: 43, done.[K
remote: Counting objects: 100% (43/43), done.[K
remote: Compressing objects: 100% (39/39), done.[K
remote: Total 43 (delta 3), reused 22 (delta 1), pack-reused 0[K
Unpacking objects: 100% (43/43), done.
--2020-06-18 02:41:28-- https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.1/dataset_shape.tar.gz
Resolving github.com (github.com)... 140.82.118.4
Connecting to github.com (github.com)|140.82.118.4|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/b4de2a00-7e55-11ea-89ac-50cd8071e6ce?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024128Z&X-Amz-Expires=300&X-Amz-Signature=2584a0dac8cf892da56cdf5d4845131e4346c765c3b6afae35879931b65f4e4e&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Ddataset_shape.tar.gz&response-content-type=application%2Foctet-stream [following]
--2020-06-18 02:41:28-- https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/b4de2a00-7e55-11ea-89ac-50cd8071e6ce?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024128Z&X-Amz-Expires=300&X-Amz-Signature=2584a0dac8cf892da56cdf5d4845131e4346c765c3b6afae35879931b65f4e4e&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Ddataset_shape.tar.gz&response-content-type=application%2Foctet-stream
Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.217.37.92
Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.217.37.92|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 5770263 (5.5M) [application/octet-stream]
Saving to: โdataset_shape.tar.gzโ
dataset_shape.tar.g 100%[===================>] 5.50M 7.61MB/s in 0.7s
2020-06-18 02:41:29 (7.61 MB/s) - โdataset_shape.tar.gzโ saved [5770263/5770263]
--2020-06-18 02:41:34-- https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch/releases/download/1.0/efficientdet-d0.pth
Resolving github.com (github.com)... 140.82.118.4
Connecting to github.com (github.com)|140.82.118.4|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/9b9d2100-791d-11ea-80b2-d35899cf95fe?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024135Z&X-Amz-Expires=300&X-Amz-Signature=c4d613ce694cbb959c9b5bec39f9e7ae9e57e90262ffee0f8d7c8c847fa1f4e5&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Defficientdet-d0.pth&response-content-type=application%2Foctet-stream [following]
--2020-06-18 02:41:35-- https://github-production-release-asset-2e65be.s3.amazonaws.com/253385242/9b9d2100-791d-11ea-80b2-d35899cf95fe?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20200618%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20200618T024135Z&X-Amz-Expires=300&X-Amz-Signature=c4d613ce694cbb959c9b5bec39f9e7ae9e57e90262ffee0f8d7c8c847fa1f4e5&X-Amz-SignedHeaders=host&actor_id=0&repo_id=253385242&response-content-disposition=attachment%3B%20filename%3Defficientdet-d0.pth&response-content-type=application%2Foctet-stream
Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.216.82.216
Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.216.82.216|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 15862583 (15M) [application/octet-stream]
Saving to: โweights/efficientdet-d0.pthโ
weights/efficientde 100%[===================>] 15.13M 15.1MB/s in 1.0s
2020-06-18 02:41:36 (15.1 MB/s) - โweights/efficientdet-d0.pthโ saved [15862583/15862583]
project_name: shape # also the folder name of the dataset that under data_path folder
train_set: train
val_set: val
num_gpus: 1
# mean and std in RGB order, actually this part should remain unchanged as long as your dataset is similar to coco.
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
# this anchor is adapted to the dataset
anchors_scales: '[2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]'
anchors_ratios: '[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]'
obj_list: ['rectangle', 'circle']
###Markdown
2. Training
###Code
# consider this is a simple dataset, train head will be enough.
! python train.py -c 0 -p shape --head_only True --lr 1e-3 --batch_size 32 --load_weights weights/efficientdet-d0.pth --num_epochs 50
# the loss will be high at first
# don't panic, be patient,
# just wait for a little bit longer
###Output
loading annotations into memory...
Done (t=0.02s)
creating index...
index created!
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
[Warning] Ignoring Error(s) in loading state_dict for EfficientDetBackbone:
size mismatch for classifier.header.pointwise_conv.conv.weight: copying a param with shape torch.Size([810, 64, 1, 1]) from checkpoint, the shape in current model is torch.Size([18, 64, 1, 1]).
size mismatch for classifier.header.pointwise_conv.conv.bias: copying a param with shape torch.Size([810]) from checkpoint, the shape in current model is torch.Size([18]).
[Warning] Don't panic if you see this, this might be because you load a pretrained weights with different number of classes. The rest of the weights should be loaded already.
[Info] loaded weights: efficientdet-d0.pth, resuming checkpoint from step: 0
[Info] freezed backbone
Step: 27. Epoch: 0/50. Iteration: 28/28. Cls loss: 26.29772. Reg loss: 0.01289. Total loss: 26.31061: 100% 28/28 [00:46<00:00, 1.66s/it]
Val. Epoch: 0/50. Classification loss: 12.20426. Regression loss: 0.01610. Total loss: 12.22037
Step: 55. Epoch: 1/50. Iteration: 28/28. Cls loss: 3.66639. Reg loss: 0.01443. Total loss: 3.68082: 100% 28/28 [00:46<00:00, 1.65s/it]
Val. Epoch: 1/50. Classification loss: 3.10739. Regression loss: 0.01396. Total loss: 3.12135
Step: 83. Epoch: 2/50. Iteration: 28/28. Cls loss: 2.61804. Reg loss: 0.01078. Total loss: 2.62881: 100% 28/28 [00:46<00:00, 1.66s/it]
Val. Epoch: 2/50. Classification loss: 1.99466. Regression loss: 0.01278. Total loss: 2.00744
Step: 111. Epoch: 3/50. Iteration: 28/28. Cls loss: 1.44927. Reg loss: 0.01206. Total loss: 1.46133: 100% 28/28 [00:46<00:00, 1.66s/it]
Val. Epoch: 3/50. Classification loss: 1.42343. Regression loss: 0.01165. Total loss: 1.43508
Step: 139. Epoch: 4/50. Iteration: 28/28. Cls loss: 1.44247. Reg loss: 0.01195. Total loss: 1.45442: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 4/50. Classification loss: 1.15894. Regression loss: 0.01040. Total loss: 1.16934
Step: 167. Epoch: 5/50. Iteration: 28/28. Cls loss: 0.96989. Reg loss: 0.01074. Total loss: 0.98064: 100% 28/28 [00:46<00:00, 1.66s/it]
Val. Epoch: 5/50. Classification loss: 0.94637. Regression loss: 0.00966. Total loss: 0.95603
Step: 195. Epoch: 6/50. Iteration: 28/28. Cls loss: 0.90316. Reg loss: 0.00981. Total loss: 0.91297: 100% 28/28 [00:46<00:00, 1.66s/it]
Val. Epoch: 6/50. Classification loss: 0.80626. Regression loss: 0.00944. Total loss: 0.81570
Step: 223. Epoch: 7/50. Iteration: 28/28. Cls loss: 0.83105. Reg loss: 0.01052. Total loss: 0.84157: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 7/50. Classification loss: 0.69999. Regression loss: 0.00907. Total loss: 0.70907
Step: 251. Epoch: 8/50. Iteration: 28/28. Cls loss: 0.68107. Reg loss: 0.01090. Total loss: 0.69197: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 8/50. Classification loss: 0.62273. Regression loss: 0.00883. Total loss: 0.63156
Step: 279. Epoch: 9/50. Iteration: 28/28. Cls loss: 0.63515. Reg loss: 0.01228. Total loss: 0.64743: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 9/50. Classification loss: 0.55948. Regression loss: 0.00851. Total loss: 0.56798
Step: 307. Epoch: 10/50. Iteration: 28/28. Cls loss: 0.50954. Reg loss: 0.01053. Total loss: 0.52007: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 10/50. Classification loss: 0.50945. Regression loss: 0.00836. Total loss: 0.51781
Step: 335. Epoch: 11/50. Iteration: 28/28. Cls loss: 0.52033. Reg loss: 0.00733. Total loss: 0.52766: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 11/50. Classification loss: 0.46788. Regression loss: 0.00800. Total loss: 0.47587
Step: 363. Epoch: 12/50. Iteration: 28/28. Cls loss: 0.49584. Reg loss: 0.00927. Total loss: 0.50511: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 12/50. Classification loss: 0.43143. Regression loss: 0.00792. Total loss: 0.43935
Step: 391. Epoch: 13/50. Iteration: 28/28. Cls loss: 0.45326. Reg loss: 0.00893. Total loss: 0.46219: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 13/50. Classification loss: 0.40211. Regression loss: 0.00764. Total loss: 0.40974
Step: 419. Epoch: 14/50. Iteration: 28/28. Cls loss: 0.40421. Reg loss: 0.00882. Total loss: 0.41303: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 14/50. Classification loss: 0.37800. Regression loss: 0.00736. Total loss: 0.38537
Step: 447. Epoch: 15/50. Iteration: 28/28. Cls loss: 0.38576. Reg loss: 0.00615. Total loss: 0.39191: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 15/50. Classification loss: 0.35435. Regression loss: 0.00746. Total loss: 0.36181
Step: 475. Epoch: 16/50. Iteration: 28/28. Cls loss: 0.38551. Reg loss: 0.01182. Total loss: 0.39733: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 16/50. Classification loss: 0.33601. Regression loss: 0.00737. Total loss: 0.34338
Step: 499. Epoch: 17/50. Iteration: 24/28. Cls loss: 0.35644. Reg loss: 0.00668. Total loss: 0.36312: 82% 23/28 [00:41<00:05, 1.15s/it]checkpoint...
Step: 503. Epoch: 17/50. Iteration: 28/28. Cls loss: 0.35166. Reg loss: 0.00812. Total loss: 0.35978: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 17/50. Classification loss: 0.31798. Regression loss: 0.00725. Total loss: 0.32523
Step: 531. Epoch: 18/50. Iteration: 28/28. Cls loss: 0.35137. Reg loss: 0.01101. Total loss: 0.36238: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 18/50. Classification loss: 0.30364. Regression loss: 0.00718. Total loss: 0.31082
Step: 559. Epoch: 19/50. Iteration: 28/28. Cls loss: 0.29872. Reg loss: 0.00653. Total loss: 0.30525: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 19/50. Classification loss: 0.29044. Regression loss: 0.00733. Total loss: 0.29776
Step: 587. Epoch: 20/50. Iteration: 28/28. Cls loss: 0.30086. Reg loss: 0.00810. Total loss: 0.30896: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 20/50. Classification loss: 0.27783. Regression loss: 0.00728. Total loss: 0.28511
Step: 615. Epoch: 21/50. Iteration: 28/28. Cls loss: 0.34610. Reg loss: 0.00809. Total loss: 0.35419: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 21/50. Classification loss: 0.26462. Regression loss: 0.00711. Total loss: 0.27173
Step: 643. Epoch: 22/50. Iteration: 28/28. Cls loss: 0.28175. Reg loss: 0.00807. Total loss: 0.28981: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 22/50. Classification loss: 0.25356. Regression loss: 0.00716. Total loss: 0.26071
Step: 671. Epoch: 23/50. Iteration: 28/28. Cls loss: 0.27373. Reg loss: 0.00875. Total loss: 0.28248: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 23/50. Classification loss: 0.24350. Regression loss: 0.00737. Total loss: 0.25087
Step: 699. Epoch: 24/50. Iteration: 28/28. Cls loss: 0.25727. Reg loss: 0.00815. Total loss: 0.26542: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 24/50. Classification loss: 0.23465. Regression loss: 0.00712. Total loss: 0.24177
Step: 727. Epoch: 25/50. Iteration: 28/28. Cls loss: 0.23017. Reg loss: 0.01109. Total loss: 0.24125: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 25/50. Classification loss: 0.22561. Regression loss: 0.00716. Total loss: 0.23277
Step: 755. Epoch: 26/50. Iteration: 28/28. Cls loss: 0.22237. Reg loss: 0.00591. Total loss: 0.22828: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 26/50. Classification loss: 0.21848. Regression loss: 0.00694. Total loss: 0.22542
Step: 783. Epoch: 27/50. Iteration: 28/28. Cls loss: 0.25054. Reg loss: 0.00917. Total loss: 0.25971: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 27/50. Classification loss: 0.21120. Regression loss: 0.00699. Total loss: 0.21819
Step: 811. Epoch: 28/50. Iteration: 28/28. Cls loss: 0.22907. Reg loss: 0.00829. Total loss: 0.23737: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 28/50. Classification loss: 0.20494. Regression loss: 0.00701. Total loss: 0.21195
Step: 839. Epoch: 29/50. Iteration: 28/28. Cls loss: 0.26674. Reg loss: 0.00852. Total loss: 0.27526: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 29/50. Classification loss: 0.19854. Regression loss: 0.00670. Total loss: 0.20523
Step: 867. Epoch: 30/50. Iteration: 28/28. Cls loss: 0.19063. Reg loss: 0.00593. Total loss: 0.19656: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 30/50. Classification loss: 0.19303. Regression loss: 0.00679. Total loss: 0.19982
Step: 895. Epoch: 31/50. Iteration: 28/28. Cls loss: 0.23191. Reg loss: 0.00678. Total loss: 0.23869: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 31/50. Classification loss: 0.18698. Regression loss: 0.00675. Total loss: 0.19373
Step: 923. Epoch: 32/50. Iteration: 28/28. Cls loss: 0.18452. Reg loss: 0.00685. Total loss: 0.19137: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 32/50. Classification loss: 0.18236. Regression loss: 0.00679. Total loss: 0.18915
Step: 951. Epoch: 33/50. Iteration: 28/28. Cls loss: 0.20275. Reg loss: 0.00758. Total loss: 0.21033: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 33/50. Classification loss: 0.17713. Regression loss: 0.00692. Total loss: 0.18405
Step: 979. Epoch: 34/50. Iteration: 28/28. Cls loss: 0.18318. Reg loss: 0.00577. Total loss: 0.18895: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 34/50. Classification loss: 0.17203. Regression loss: 0.00657. Total loss: 0.17860
Step: 999. Epoch: 35/50. Iteration: 20/28. Cls loss: 0.18499. Reg loss: 0.00838. Total loss: 0.19337: 68% 19/28 [00:37<00:10, 1.17s/it]checkpoint...
Step: 1007. Epoch: 35/50. Iteration: 28/28. Cls loss: 0.18154. Reg loss: 0.00630. Total loss: 0.18784: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 35/50. Classification loss: 0.16700. Regression loss: 0.00666. Total loss: 0.17366
Step: 1035. Epoch: 36/50. Iteration: 28/28. Cls loss: 0.18250. Reg loss: 0.00611. Total loss: 0.18861: 100% 28/28 [00:47<00:00, 1.68s/it]
Val. Epoch: 36/50. Classification loss: 0.16309. Regression loss: 0.00679. Total loss: 0.16989
Step: 1063. Epoch: 37/50. Iteration: 28/28. Cls loss: 0.15622. Reg loss: 0.00623. Total loss: 0.16245: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 37/50. Classification loss: 0.15933. Regression loss: 0.00666. Total loss: 0.16599
Step: 1091. Epoch: 38/50. Iteration: 28/28. Cls loss: 0.14960. Reg loss: 0.00556. Total loss: 0.15515: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 38/50. Classification loss: 0.15517. Regression loss: 0.00683. Total loss: 0.16201
Step: 1119. Epoch: 39/50. Iteration: 28/28. Cls loss: 0.17928. Reg loss: 0.00657. Total loss: 0.18585: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 39/50. Classification loss: 0.15171. Regression loss: 0.00657. Total loss: 0.15828
Step: 1147. Epoch: 40/50. Iteration: 28/28. Cls loss: 0.17436. Reg loss: 0.00468. Total loss: 0.17904: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 40/50. Classification loss: 0.14942. Regression loss: 0.00667. Total loss: 0.15609
Step: 1175. Epoch: 41/50. Iteration: 28/28. Cls loss: 0.16362. Reg loss: 0.00781. Total loss: 0.17143: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 41/50. Classification loss: 0.14597. Regression loss: 0.00686. Total loss: 0.15283
Step: 1203. Epoch: 42/50. Iteration: 28/28. Cls loss: 0.17241. Reg loss: 0.00837. Total loss: 0.18078: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 42/50. Classification loss: 0.14308. Regression loss: 0.00662. Total loss: 0.14969
Step: 1231. Epoch: 43/50. Iteration: 28/28. Cls loss: 0.17507. Reg loss: 0.00802. Total loss: 0.18309: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 43/50. Classification loss: 0.13933. Regression loss: 0.00666. Total loss: 0.14599
Step: 1259. Epoch: 44/50. Iteration: 28/28. Cls loss: 0.17234. Reg loss: 0.00580. Total loss: 0.17814: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 44/50. Classification loss: 0.13601. Regression loss: 0.00647. Total loss: 0.14247
Step: 1287. Epoch: 45/50. Iteration: 28/28. Cls loss: 0.16627. Reg loss: 0.00595. Total loss: 0.17222: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 45/50. Classification loss: 0.13402. Regression loss: 0.00653. Total loss: 0.14055
Step: 1315. Epoch: 46/50. Iteration: 28/28. Cls loss: 0.17035. Reg loss: 0.00682. Total loss: 0.17717: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 46/50. Classification loss: 0.13196. Regression loss: 0.00638. Total loss: 0.13834
Step: 1343. Epoch: 47/50. Iteration: 28/28. Cls loss: 0.12934. Reg loss: 0.00527. Total loss: 0.13461: 100% 28/28 [00:46<00:00, 1.68s/it]
Val. Epoch: 47/50. Classification loss: 0.12878. Regression loss: 0.00664. Total loss: 0.13542
Step: 1371. Epoch: 48/50. Iteration: 28/28. Cls loss: 0.12199. Reg loss: 0.00390. Total loss: 0.12589: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 48/50. Classification loss: 0.12630. Regression loss: 0.00681. Total loss: 0.13311
Step: 1399. Epoch: 49/50. Iteration: 28/28. Cls loss: 0.13337. Reg loss: 0.00523. Total loss: 0.13859: 100% 28/28 [00:46<00:00, 1.67s/it]
Val. Epoch: 49/50. Classification loss: 0.12423. Regression loss: 0.00635. Total loss: 0.13058
###Markdown
3. Evaluation
###Code
! python coco_eval.py -c 0 -p shape -w logs/shape/efficientdet-d0_49_1400.pth
###Output
running coco-style evaluation on project shape, weights logs/shape/efficientdet-d0_49_1400.pth...
loading annotations into memory...
Done (t=0.00s)
creating index...
index created!
100% 100/100 [00:08<00:00, 11.80it/s]
Loading and preparing results...
DONE (t=0.63s)
creating index...
index created!
BBox
Running per image evaluation...
Evaluate annotation type *bbox*
DONE (t=1.46s).
Accumulating evaluation results...
DONE (t=0.14s).
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.781
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.947
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.868
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.794
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.740
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.470
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.841
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.843
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = -1.000
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.850
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.819
###Markdown
4. Visualize
###Code
import torch
from torch.backends import cudnn
from backbone import EfficientDetBackbone
import cv2
import matplotlib.pyplot as plt
import numpy as np
from efficientdet.utils import BBoxTransform, ClipBoxes
from utils.utils import preprocess, invert_affine, postprocess
compound_coef = 0
force_input_size = None # set None to use default size
img_path = 'datasets/shape/val/999.jpg'
threshold = 0.2
iou_threshold = 0.2
use_cuda = True
use_float16 = False
cudnn.fastest = True
cudnn.benchmark = True
obj_list = ['rectangle', 'circle']
# tf bilinear interpolation is different from any other's, just make do
input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
input_size = input_sizes[compound_coef] if force_input_size is None else force_input_size
ori_imgs, framed_imgs, framed_metas = preprocess(img_path, max_size=input_size)
if use_cuda:
x = torch.stack([torch.from_numpy(fi).cuda() for fi in framed_imgs], 0)
else:
x = torch.stack([torch.from_numpy(fi) for fi in framed_imgs], 0)
x = x.to(torch.float32 if not use_float16 else torch.float16).permute(0, 3, 1, 2)
model = EfficientDetBackbone(compound_coef=compound_coef, num_classes=len(obj_list),
# replace this part with your project's anchor config
ratios=[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)],
scales=[2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
model.load_state_dict(torch.load('logs/shape/efficientdet-d0_49_1400.pth'))
model.requires_grad_(False)
model.eval()
if use_cuda:
model = model.cuda()
if use_float16:
model = model.half()
with torch.no_grad():
features, regression, classification, anchors = model(x)
regressBoxes = BBoxTransform()
clipBoxes = ClipBoxes()
out = postprocess(x,
anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)
out = invert_affine(framed_metas, out)
for i in range(len(ori_imgs)):
if len(out[i]['rois']) == 0:
continue
for j in range(len(out[i]['rois'])):
(x1, y1, x2, y2) = out[i]['rois'][j].astype(np.int)
cv2.rectangle(ori_imgs[i], (x1, y1), (x2, y2), (255, 255, 0), 2)
obj = obj_list[out[i]['class_ids'][j]]
score = float(out[i]['scores'][j])
cv2.putText(ori_imgs[i], '{}, {:.3f}'.format(obj, score),
(x1, y1 + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 255, 0), 1)
plt.imshow(ori_imgs[i])
###Output
_____no_output_____ |
get_effechecka_data.ipynb | ###Markdown
Lists of Species by Country This code uses the effechecka API to get a list of taxa that have been reported in each country. The API taxes a polygon (points are lat/lon coordinates) and returns observations within that polygon from several species occurrance databases. To use this notebook, you need a list of geonames ids and a json file with geonames polygons. In the cell below, we import the necessary libraries and data files. The input file, test_country.txt, contains the geonames ID for the country. There is only two in the file at any given time. This helps to avoid overloading the server. The code is written so that more countries can be included if in the future the server can handle more queries at once. The file low_res_countries.json are the polygons from geonames that have been reduced in resolution so they can fit in the URL API call.
###Code
import urllib.request
import urllib.error
import json
in_file = open('test_country.txt', 'r')
shape_file = open('low_res_countries.json','r')
shapes = json.load(shape_file)
###Output
_____no_output_____
###Markdown
The code below takes the country-shaped polygon and forms the URL to query the API. Each query will take two hits. The first gets effechecka started on the query and the second (done a day later) will grab the results. If the query has been submitted before, then you will not need to do the second query. The json results returned by the API are written to the out_files. Each country has a separate out_file.
###Code
out_files = ['output1.tsv','output2.tsv','output3.tsv']
#all of the code to line 47 is about reading the input json and forming the URL for the API query
for index, line in enumerate(in_file):
line = line.strip()
row = line.split('\t')
geonamesid = row[2]
iso = row[1]
print(geonamesid) #print the id so you know what country you are on
country = row[0]
polygons = shapes['features']
for polygon in polygons:
geoid = polygon['properties']['geoNameId']
if geonamesid == geoid: #use the geonames id to find the right polygon in the shapes file
shape_type = polygon['geometry']['type']
if shape_type == 'Polygon': #some country polygons are multiple polygons. Need a different procedure
p = []
wkt = polygon['geometry']['coordinates'][0]
for i in wkt:
z = []
lat = i[1]
lon = i[0]
z.append(str(lon))
z.append(str(lat))
m = '%20'.join(z)
p.append(str(m))
q = '%2C%20'.join(p)
url = 'http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=POLYGON((' + str(q) + '))'
z = 'POLYGON((' + str(q) + '))'
elif shape_type == 'MultiPolygon':
q = ''
url = 'http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=GEOMETRYCOLLECTION%28POLYGON%20%28%28'
wkt = polygon['geometry']['coordinates']
for k in wkt:
k = k[0]
if len(k) == 0: #the process of shortening the polygons left a lot of blank coordinates. They get removed here.
continue
p = []
for i in k:
z = []
for j in i:
z.append(str(j))
m = '%20'.join(z)
p.append(str(m))
q = q + '%2C%20'.join(p) + '%29%29%2CPOLYGON%20%28%28'
url = url + q
url = url.strip('%2CPOLYGON%20%28%28')
url = url + '%29'
z = 'GEOMETRYCOLLECTION%28POLYGON%20%28%28' + q.strip('%2CPOLYGON%20%28%28')
z = z + '%29'
print(url)
try: urllib.request.urlretrieve(url, out_files[index]) #This is where the url is submitted to the API and results are read
except urllib.error.URLError as e:
print(e.reason)
with open(out_files[index], 'a') as u:
u.write('\ncountry\t' + country + '\n')
u.write('country_uri\t' + geonamesid + '\n')
u.write('polygon\t' + z + '\n')
u.close()
print('complete') #make sure the code gets to the end
###Output
99237
http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=GEOMETRYCOLLECTION%28POLYGON%20%28%2848.567%2029.916%2C%2048.21%2030.033%2C%2047.95%2030.061%2C%2047.709%2030.096%2C%2047.181%2030.026%2C%2046.556%2029.103%2C%2044.72%2029.206%2C%2043.611%2030.022%2C%2041.444%2031.378%2C%2039.203%2032.158%2C%2039.26%2032.356%2C%2038.986%2032.478%2C%2038.796%2033.368%2C%2041.238%2034.785%2C%2041.283%2035.486%2C%2041.381%2035.835%2C%2041.295%2036.356%2C%2041.828%2036.593%2C%2042.364%2037.109%2C%2042.799%2037.377%2C%2043.167%2037.374%2C%2043.8%2037.23%2C%2044.03%2037.325%2C%2044.279%2037.236%2C%2044.202%2037.098%2C%2044.351%2037.049%2C%2044.771%2037.167%2C%2044.921%2037.02%2C%2044.861%2036.784%2C%2045.072%2036.691%2C%2045.108%2036.419%2C%2045.284%2036.383%2C%2045.279%2036.253%2C%2045.387%2036.085%2C%2045.557%2036.001%2C%2046.093%2035.861%2C%2046.349%2035.809%2C%2046.013%2035.678%2C%2046.156%2035.287%2C%2046.203%2035.198%2C%2046.189%2035.108%2C%2046.064%2035.036%2C%2045.883%2035.031%2C%2045.799%2034.91%2C%2045.701%2034.812%2C%2045.748%2034.542%2C%2045.532%2034.492%2C%2045.503%2034.327%2C%2045.588%2034.303%2C%2045.413%2033.972%2C%2045.777%2033.623%2C%2045.907%2033.626%2C%2045.871%2033.491%2C%2046.051%2033.374%2C%2046.205%2033.18%2C%2046.05%2033.121%2C%2046.102%2032.97%2C%2046.737%2032.762%2C%2047.177%2032.452%2C%2047.449%2032.401%2C%2047.567%2032.224%2C%2047.648%2032.084%2C%2047.699%2031.4%2C%2048.031%2030.994%2C%2048.17%2030.423%2C%2048.267%2030.336%2C%2048.383%2030.127%2C%2048.567%2029.916%29%29%29
Service Unavailable
2963597
http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=POLYGON((-8.063%2055.379%2C%20-7.22%2055.46%2C%20-6.923%2055.237%2C%20-8.096%2054.438%2C%20-7.551%2054.125%2C%20-7.253%2054.203%2C%20-7.042%2054.361%2C%20-6.108%2054.007%2C%20-6.053%2053.729%2C%20-5.91%2053.37%2C%20-6.002%2052.967%2C%20-6.075%2052.476%2C%20-6.119%2052.14%2C%20-6.481%2052.066%2C%20-6.932%2052.059%2C%20-7.426%2051.923%2C%20-8.041%2051.713%2C%20-8.459%2051.529%2C%20-8.997%2051.399%2C%20-9.667%2051.316%2C%20-10.272%2051.522%2C%20-10.722%2051.883%2C%20-10.722%2052.153%2C%20-9.941%2052.561%2C%20-9.84375%2053.021%2C%20-10.173%2053.291%2C%20-10.404%2053.572%2C%20-10.404%2053.93%2C%20-10.25%2054.29%2C%20-9.92%2054.399%2C%20-8.833%2054.367%2C%20-8.975%2054.622%2C%20-8.8%2054.908%2C%20-8.481%2055.272%2C%20-8.063%2055.379))
Service Unavailable
294640
http://api.effechecka.org/checklist.tsv?traitSelector=&wktString=GEOMETRYCOLLECTION%28POLYGON%20%28%2835.033%2029.631%2C%2034.856%2029.739%2C%2034.547%2030.4%2C%2034.27%2031.217%2C%2034.39%2031.394%2C%2034.491%2031.597%2C%2034.963%2032.821%2C%2035.115%2033.09%2C%2035.534%2033.121%2C%2035.625%2033.248%2C%2035.847%2033.2%2C%2035.87%2033.056%2C%2035.756%2032.726%2C%2035.568%2032.39%2C%2035.405%2032.508%2C%2035.076%2032.468%2C%2035.006%2032.028%2C%2035.057%2031.852%2C%2035.243%2031.751%2C%2034.89%2031.377%2C%2035.384%2031.486%2C%2035.39%2031.244%2C%2035.415%2030.949%2C%2035.202%2030.575%2C%2035.192%2030.347%2C%2035.176%2030.119%2C%2035.033%2029.631%29%29%29
Service Unavailable
complete
|
0a_Minimum_working_example.ipynb | ###Markdown
OUTDATED, the examples moved to the gallery See https://empymod.github.io/emg3d-gallery---- Minimum working exampleThis is a simple minimum working example to get started, along the lines of the one given in https://emg3d.readthedocs.io/en/stable/usage.htmlexample.To see some more realistic models have a look at the other notebooks in this repo.-------------------------------------------------------------------------------This notebooks uses `discretize` to create meshes easily and plot the model as well as the resulting electric field, which also requires `matplotlib`. If you are interested in a basic example that only requires `emg3d` here it is:```pyimport emg3dimport numpy as np Create a simple grid, 8 cells of length 1 in each direction, starting at the origin.grid = emg3d.utils.TensorMesh([np.ones(8), np.ones(8), np.ones(8)], x0=np.array([0, 0, 0])) The model is a fullspace with tri-axial anisotropy.model = emg3d.utils.Model(grid, res_x=1.5, res_y=1.8, res_z=3.3) The source is a x-directed, horizontal dipole at (4, 4, 4), frequency is 10 Hz.sfield = emg3d.utils.get_source_field(grid, src=[4, 4, 4, 0, 0], freq=10.0) Calculate the electric signal.efield = emg3d.solver.solver(grid, model, sfield, verb=3) Get the corresponding magnetic signal.hfield = emg3d.utils.get_h_field(grid, model, efield) ```-------------------------------------------------------------------------------**Requires**- **emg3d >= 0.9.1**- discretize, matplotlibFirst, we load `emg3d` and `discretize` (to create a mesh), along with `numpy` and `matplotlib`:
###Code
import emg3d
import discretize
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
%matplotlib notebook
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
1. MeshFirst, we define the mesh (see `discretize.TensorMesh` for more info). In reality, this task requires some careful considerations. E.g., to avoid edge effects, the mesh should be large enough in order for the fields to dissipate, yet fine enough around source and receiver to accurately model them. This grid is too small, but serves as a minimal example.
###Code
grid = discretize.TensorMesh(
[[(25, 10, -1.04), (25, 28), (25, 10, 1.04)],
[(50, 8, -1.03), (50, 16), (50, 8, 1.03)],
[(30, 8, -1.05), (30, 16), (30, 8, 1.05)]],
x0='CCC')
grid
###Output
_____no_output_____
###Markdown
2. ModelNext we define a very simple fullspace model with $\rho_x=1.5\,\Omega\rm{m}$, $\rho_y=1.8\,\Omega\rm{m}$, and $\rho_z=3.3\,\Omega\rm{m}$.
###Code
model = emg3d.utils.Model(grid, res_x=1.5, res_y=1.8, res_z=3.3)
###Output
_____no_output_____
###Markdown
We can plot the model using `discretize`; in this case it is obviously rather a boring plot, as it shows a homogenous fullspace.
###Code
grid.plot_3d_slicer(np.ones(grid.vnC)*model.res_x) # x-resistivity
###Output
_____no_output_____
###Markdown
3. Source fieldThe source is an x-directed dipole at the origin, with a 10 Hz signal of 1 A (`src` is defined either as `[x, y, z, dip, azimuth]` or `[x0, x1, y0, y1, z0, z1]`; the strength can be set via the `strength` parameter).
###Code
sfield = emg3d.utils.get_source_field(grid, src=[0, 0, 0, 0, 0], freq=10)
###Output
_____no_output_____
###Markdown
4. Calculate the electric fieldNow we can calculate the electric field with `emg3d`:
###Code
efield = emg3d.solver.solver(grid, model, sfield, verb=3)
###Output
* WARNING :: ``emg3d.solver.solver()`` is renamed to ``emg3d.solve()``.
Use the new ``emg3d.solve()``, as ``solver()`` will be
removed in the future.
:: emg3d START :: 07:31:17 :: v0.9.3.dev17
MG-cycle : 'F' sslsolver : False
semicoarsening : False [0] tol : 1e-06
linerelaxation : False [0] maxit : 50
nu_{i,1,c,2} : 0, 2, 1, 2 verb : 3
Original grid : 48 x 32 x 32 => 49,152 cells
Coarsest grid : 3 x 2 x 2 => 12 cells
Coarsest level : 4 ; 4 ; 4
[hh:mm:ss] rel. error [abs. error, last/prev] l s
h_
2h_ \ /
4h_ \ /\ /
8h_ \ /\ / \ /
16h_ \/\/ \/ \/
[07:31:18] 2.623e-02 after 1 F-cycles [1.464e-06, 0.026] 0 0
[07:31:18] 2.253e-03 after 2 F-cycles [1.258e-07, 0.086] 0 0
[07:31:18] 3.051e-04 after 3 F-cycles [1.704e-08, 0.135] 0 0
[07:31:19] 5.500e-05 after 4 F-cycles [3.071e-09, 0.180] 0 0
[07:31:19] 1.170e-05 after 5 F-cycles [6.531e-10, 0.213] 0 0
[07:31:19] 2.745e-06 after 6 F-cycles [1.532e-10, 0.235] 0 0
[07:31:20] 6.873e-07 after 7 F-cycles [3.837e-11, 0.250] 0 0
> CONVERGED
> MG cycles : 7
> Final rel. error : 6.873e-07
:: emg3d END :: 07:31:20 :: runtime = 0:00:02
###Markdown
The calculation requires in this case seven multigrid F-cycles and taken just a few seconds. It was able to coarsen in each dimension four times, where the input grid had 49,152 cells, and the coarsest grid had 12 cells. 5. Plot the resultWe can again utilize the in-built functions of a `discretize`-grid to plot, e.g., the x-directed electric field.
###Code
grid.plot_3d_slicer(efield.fx.ravel('F'), view='abs', vType='Ex', pcolorOpts={'norm': LogNorm()})
emg3d.Report(discretize)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.